summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target/cris/qemu-cris/patches/4.4.6/crisv32_ethernet_driver.patch4050
1 files changed, 4050 insertions, 0 deletions
diff --git a/target/cris/qemu-cris/patches/4.4.6/crisv32_ethernet_driver.patch b/target/cris/qemu-cris/patches/4.4.6/crisv32_ethernet_driver.patch
new file mode 100644
index 000000000..80f03383c
--- /dev/null
+++ b/target/cris/qemu-cris/patches/4.4.6/crisv32_ethernet_driver.patch
@@ -0,0 +1,4050 @@
+diff -Nur linux-4.4.6.orig/arch/cris/arch-v32/drivers/Kconfig linux-4.4.6/arch/cris/arch-v32/drivers/Kconfig
+--- linux-4.4.6.orig/arch/cris/arch-v32/drivers/Kconfig 2016-03-16 16:43:17.000000000 +0100
++++ linux-4.4.6/arch/cris/arch-v32/drivers/Kconfig 2016-03-20 11:35:09.089964990 +0100
+@@ -8,9 +8,18 @@
+ This option enables the ETRAX FS built-in 10/100Mbit Ethernet
+ controller.
+
++config ETRAX_HAVE_PHY
++ bool "PHY present"
++ default y
++ help
++ Search and use the first PHY available on the MDIO bus. Fail
++ if none is found. Say Y here if you are not in a switched
++ environment (single port device).
++
+ config ETRAX_NO_PHY
+ bool "PHY not present"
+ depends on ETRAX_ETHERNET
++ default n
+ help
+ This option disables all MDIO communication with an ethernet
+ transceiver connected to the MII interface. This option shall
+@@ -18,6 +27,70 @@
+ switch. This option should normally be disabled. If enabled,
+ speed and duplex will be locked to 100 Mbit and full duplex.
+
++config ETRAX_PHY_FALLBACK
++ bool "Fixed PHY fallback"
++ depends on ETRAX_ETHERNET
++ default n
++ help
++ If no PHY is found on the MDIO bus, fall back on a fixed
++ 100/Full fixed PHY. Say Y here if you need dynamic PHY
++ presence detection (switch connection where some but not
++ all ports have integrated PHYs), otherwise say N.
++
++config ETRAX_ETHERNET_IFACE0
++ depends on ETRAX_ETHERNET
++ bool "Enable network interface 0"
++
++config ETRAX_ETHERNET_IFACE1
++ depends on (ETRAX_ETHERNET && ETRAXFS)
++ bool "Enable network interface 1 (uses DMA6 and DMA7)"
++
++choice
++ prompt "Eth0 led group"
++ depends on ETRAX_ETHERNET_IFACE0
++ default ETRAX_ETH0_USE_LEDGRP0
++
++config ETRAX_ETH0_USE_LEDGRP0
++ bool "Use LED grp 0"
++ depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
++ help
++ Use LED grp 0 for eth0
++
++config ETRAX_ETH0_USE_LEDGRP1
++ bool "Use LED grp 1"
++ depends on ETRAX_NBR_LED_GRP_TWO
++ help
++ Use LED grp 1 for eth0
++
++config ETRAX_ETH0_USE_LEDGRPNULL
++ bool "Use no LEDs for eth0"
++ help
++ Use no LEDs for eth0
++endchoice
++
++choice
++ prompt "Eth1 led group"
++ depends on ETRAX_ETHERNET_IFACE1
++ default ETRAX_ETH1_USE_LEDGRP1
++
++config ETRAX_ETH1_USE_LEDGRP0
++ bool "Use LED grp 0"
++ depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO
++ help
++ Use LED grp 0 for eth1
++
++config ETRAX_ETH1_USE_LEDGRP1
++ bool "Use LED grp 1"
++ depends on ETRAX_NBR_LED_GRP_TWO
++ help
++ Use LED grp 1 for eth1
++
++config ETRAX_ETH1_USE_LEDGRPNULL
++ bool "Use no LEDs for eth1"
++ help
++ Use no LEDs for eth1
++endchoice
++
+ config ETRAXFS_SERIAL
+ bool "Serial-port support"
+ depends on ETRAX_ARCH_V32
+diff -Nur linux-4.4.6.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h linux-4.4.6/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h
+--- linux-4.4.6.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-03-16 16:43:17.000000000 +0100
++++ linux-4.4.6/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-03-20 11:35:09.089964990 +0100
+@@ -2,69 +2,64 @@
+ #define __eth_defs_h
+
+ /*
+- * This file is autogenerated from
+- * file: eth.r
+- * id: eth_regs.r,v 1.16 2005/05/20 15:41:22 perz Exp
+- * last modfied: Mon Jan 9 06:06:41 2006
+- *
+- * by /n/asic/design/tools/rdesc/rdes2c eth.r
+- * id: $Id: eth_defs.h,v 1.7 2006/01/26 13:45:30 karljope Exp $
+- * Any changes here will be lost.
+- *
+- * -*- buffer-read-only: t -*-
++ * Note: Previously this was autogenerated code from the hardware
++ * implementation. However, to enable the same file to be used
++ * for both ARTPEC-3 and ETRAX FS this file is now hand-edited.
++ * Be careful.
+ */
++
+ /* Main access macros */
+ #ifndef REG_RD
+ #define REG_RD( scope, inst, reg ) \
+- REG_READ( reg_##scope##_##reg, \
+- (inst) + REG_RD_ADDR_##scope##_##reg )
++ REG_READ( reg_##scope##_##reg, \
++ (inst) + REG_RD_ADDR_##scope##_##reg )
+ #endif
+
+ #ifndef REG_WR
+ #define REG_WR( scope, inst, reg, val ) \
+- REG_WRITE( reg_##scope##_##reg, \
+- (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
++ REG_WRITE( reg_##scope##_##reg, \
++ (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
+ #endif
+
+ #ifndef REG_RD_VECT
+ #define REG_RD_VECT( scope, inst, reg, index ) \
+- REG_READ( reg_##scope##_##reg, \
+- (inst) + REG_RD_ADDR_##scope##_##reg + \
+- (index) * STRIDE_##scope##_##reg )
++ REG_READ( reg_##scope##_##reg, \
++ (inst) + REG_RD_ADDR_##scope##_##reg + \
++ (index) * STRIDE_##scope##_##reg )
+ #endif
+
+ #ifndef REG_WR_VECT
+ #define REG_WR_VECT( scope, inst, reg, index, val ) \
+- REG_WRITE( reg_##scope##_##reg, \
+- (inst) + REG_WR_ADDR_##scope##_##reg + \
+- (index) * STRIDE_##scope##_##reg, (val) )
++ REG_WRITE( reg_##scope##_##reg, \
++ (inst) + REG_WR_ADDR_##scope##_##reg + \
++ (index) * STRIDE_##scope##_##reg, (val) )
+ #endif
+
+ #ifndef REG_RD_INT
+ #define REG_RD_INT( scope, inst, reg ) \
+- REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg )
++ REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg )
+ #endif
+
+ #ifndef REG_WR_INT
+ #define REG_WR_INT( scope, inst, reg, val ) \
+- REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
++ REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) )
+ #endif
+
+ #ifndef REG_RD_INT_VECT
+ #define REG_RD_INT_VECT( scope, inst, reg, index ) \
+- REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \
+- (index) * STRIDE_##scope##_##reg )
++ REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \
++ (index) * STRIDE_##scope##_##reg )
+ #endif
+
+ #ifndef REG_WR_INT_VECT
+ #define REG_WR_INT_VECT( scope, inst, reg, index, val ) \
+- REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \
+- (index) * STRIDE_##scope##_##reg, (val) )
++ REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \
++ (index) * STRIDE_##scope##_##reg, (val) )
+ #endif
+
+ #ifndef REG_TYPE_CONV
+ #define REG_TYPE_CONV( type, orgtype, val ) \
+- ( { union { orgtype o; type n; } r; r.o = val; r.n; } )
++ ( { union { orgtype o; type n; } r; r.o = val; r.n; } )
+ #endif
+
+ #ifndef reg_page_size
+@@ -73,306 +68,332 @@
+
+ #ifndef REG_ADDR
+ #define REG_ADDR( scope, inst, reg ) \
+- ( (inst) + REG_RD_ADDR_##scope##_##reg )
++ ( (inst) + REG_RD_ADDR_##scope##_##reg )
+ #endif
+
+ #ifndef REG_ADDR_VECT
+ #define REG_ADDR_VECT( scope, inst, reg, index ) \
+- ( (inst) + REG_RD_ADDR_##scope##_##reg + \
+- (index) * STRIDE_##scope##_##reg )
++ ( (inst) + REG_RD_ADDR_##scope##_##reg + \
++ (index) * STRIDE_##scope##_##reg )
+ #endif
+
+ /* C-code for register scope eth */
+
+ /* Register rw_ma0_lo, scope eth, type rw */
+ typedef struct {
+- unsigned int addr : 32;
++ unsigned int addr : 32;
+ } reg_eth_rw_ma0_lo;
+ #define REG_RD_ADDR_eth_rw_ma0_lo 0
+ #define REG_WR_ADDR_eth_rw_ma0_lo 0
+
+ /* Register rw_ma0_hi, scope eth, type rw */
+ typedef struct {
+- unsigned int addr : 16;
+- unsigned int dummy1 : 16;
++ unsigned int addr : 16;
++ unsigned int dummy1 : 16;
+ } reg_eth_rw_ma0_hi;
+ #define REG_RD_ADDR_eth_rw_ma0_hi 4
+ #define REG_WR_ADDR_eth_rw_ma0_hi 4
+
+ /* Register rw_ma1_lo, scope eth, type rw */
+ typedef struct {
+- unsigned int addr : 32;
++ unsigned int addr : 32;
+ } reg_eth_rw_ma1_lo;
+ #define REG_RD_ADDR_eth_rw_ma1_lo 8
+ #define REG_WR_ADDR_eth_rw_ma1_lo 8
+
+ /* Register rw_ma1_hi, scope eth, type rw */
+ typedef struct {
+- unsigned int addr : 16;
+- unsigned int dummy1 : 16;
++ unsigned int addr : 16;
++ unsigned int dummy1 : 16;
+ } reg_eth_rw_ma1_hi;
+ #define REG_RD_ADDR_eth_rw_ma1_hi 12
+ #define REG_WR_ADDR_eth_rw_ma1_hi 12
+
+ /* Register rw_ga_lo, scope eth, type rw */
+ typedef struct {
+- unsigned int tbl : 32;
++ unsigned int table : 32;
+ } reg_eth_rw_ga_lo;
+ #define REG_RD_ADDR_eth_rw_ga_lo 16
+ #define REG_WR_ADDR_eth_rw_ga_lo 16
+
+ /* Register rw_ga_hi, scope eth, type rw */
+ typedef struct {
+- unsigned int tbl : 32;
++ unsigned int table : 32;
+ } reg_eth_rw_ga_hi;
+ #define REG_RD_ADDR_eth_rw_ga_hi 20
+ #define REG_WR_ADDR_eth_rw_ga_hi 20
+
+ /* Register rw_gen_ctrl, scope eth, type rw */
+ typedef struct {
+- unsigned int en : 1;
+- unsigned int phy : 2;
+- unsigned int protocol : 1;
+- unsigned int loopback : 1;
+- unsigned int flow_ctrl : 1;
+- unsigned int gtxclk_out : 1;
+- unsigned int phyrst_n : 1;
+- unsigned int dummy1 : 24;
++ unsigned int en : 1;
++ unsigned int phy : 2;
++ unsigned int protocol : 1;
++ unsigned int loopback : 1;
++ unsigned int flow_ctrl : 1;
++ unsigned int gtxclk_out : 1;
++ unsigned int phyrst_n : 1;
++ unsigned int dummy1 : 24;
+ } reg_eth_rw_gen_ctrl;
+ #define REG_RD_ADDR_eth_rw_gen_ctrl 24
+ #define REG_WR_ADDR_eth_rw_gen_ctrl 24
+
+ /* Register rw_rec_ctrl, scope eth, type rw */
+ typedef struct {
+- unsigned int ma0 : 1;
+- unsigned int ma1 : 1;
+- unsigned int individual : 1;
+- unsigned int broadcast : 1;
+- unsigned int undersize : 1;
+- unsigned int oversize : 1;
+- unsigned int bad_crc : 1;
+- unsigned int duplex : 1;
+- unsigned int max_size : 16;
+- unsigned int dummy1 : 8;
++ unsigned int ma0 : 1;
++ unsigned int ma1 : 1;
++ unsigned int individual : 1;
++ unsigned int broadcast : 1;
++ unsigned int undersize : 1;
++ unsigned int oversize : 1;
++ unsigned int bad_crc : 1;
++ unsigned int duplex : 1;
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ unsigned int max_size : 16;
++ unsigned int dummy1 : 8;
++#else
++ unsigned int max_size : 1;
++ unsigned int dummy1 : 23;
++#endif
+ } reg_eth_rw_rec_ctrl;
+ #define REG_RD_ADDR_eth_rw_rec_ctrl 28
+ #define REG_WR_ADDR_eth_rw_rec_ctrl 28
+
+ /* Register rw_tr_ctrl, scope eth, type rw */
+ typedef struct {
+- unsigned int crc : 1;
+- unsigned int pad : 1;
+- unsigned int retry : 1;
+- unsigned int ignore_col : 1;
+- unsigned int cancel : 1;
+- unsigned int hsh_delay : 1;
+- unsigned int ignore_crs : 1;
+- unsigned int carrier_ext : 1;
+- unsigned int dummy1 : 24;
++ unsigned int crc : 1;
++ unsigned int pad : 1;
++ unsigned int retry : 1;
++ unsigned int ignore_col : 1;
++ unsigned int cancel : 1;
++ unsigned int hsh_delay : 1;
++ unsigned int ignore_crs : 1;
++ unsigned int carrier_ext : 1;
++ unsigned int dummy1 : 24;
+ } reg_eth_rw_tr_ctrl;
+ #define REG_RD_ADDR_eth_rw_tr_ctrl 32
+ #define REG_WR_ADDR_eth_rw_tr_ctrl 32
+
+ /* Register rw_clr_err, scope eth, type rw */
+ typedef struct {
+- unsigned int clr : 1;
+- unsigned int dummy1 : 31;
++ unsigned int clr : 1;
++ unsigned int dummy1 : 31;
+ } reg_eth_rw_clr_err;
+ #define REG_RD_ADDR_eth_rw_clr_err 36
+ #define REG_WR_ADDR_eth_rw_clr_err 36
+
+ /* Register rw_mgm_ctrl, scope eth, type rw */
+ typedef struct {
+- unsigned int mdio : 1;
+- unsigned int mdoe : 1;
+- unsigned int mdc : 1;
+- unsigned int dummy1 : 29;
++ unsigned int mdio : 1;
++ unsigned int mdoe : 1;
++ unsigned int mdc : 1;
++ unsigned int phyclk : 1;
++ unsigned int txdata : 4;
++ unsigned int txen : 1;
++ unsigned int dummy1 : 23;
+ } reg_eth_rw_mgm_ctrl;
+ #define REG_RD_ADDR_eth_rw_mgm_ctrl 40
+ #define REG_WR_ADDR_eth_rw_mgm_ctrl 40
+
+ /* Register r_stat, scope eth, type r */
+ typedef struct {
+- unsigned int mdio : 1;
+- unsigned int exc_col : 1;
+- unsigned int urun : 1;
+- unsigned int clk_125 : 1;
+- unsigned int dummy1 : 28;
++ unsigned int mdio : 1;
++ unsigned int exc_col : 1;
++ unsigned int urun : 1;
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ unsigned int clk_125 : 1;
++#else
++ unsigned int phyclk : 1;
++#endif
++ unsigned int txdata : 4;
++ unsigned int txen : 1;
++ unsigned int col : 1;
++ unsigned int crs : 1;
++ unsigned int txclk : 1;
++ unsigned int rxdata : 4;
++ unsigned int rxer : 1;
++ unsigned int rxdv : 1;
++ unsigned int rxclk : 1;
++ unsigned int dummy1 : 13;
+ } reg_eth_r_stat;
+ #define REG_RD_ADDR_eth_r_stat 44
+
+ /* Register rs_rec_cnt, scope eth, type rs */
+ typedef struct {
+- unsigned int crc_err : 8;
+- unsigned int align_err : 8;
+- unsigned int oversize : 8;
+- unsigned int congestion : 8;
++ unsigned int crc_err : 8;
++ unsigned int align_err : 8;
++ unsigned int oversize : 8;
++ unsigned int congestion : 8;
+ } reg_eth_rs_rec_cnt;
+ #define REG_RD_ADDR_eth_rs_rec_cnt 48
+
+ /* Register r_rec_cnt, scope eth, type r */
+ typedef struct {
+- unsigned int crc_err : 8;
+- unsigned int align_err : 8;
+- unsigned int oversize : 8;
+- unsigned int congestion : 8;
++ unsigned int crc_err : 8;
++ unsigned int align_err : 8;
++ unsigned int oversize : 8;
++ unsigned int congestion : 8;
+ } reg_eth_r_rec_cnt;
+ #define REG_RD_ADDR_eth_r_rec_cnt 52
+
+ /* Register rs_tr_cnt, scope eth, type rs */
+ typedef struct {
+- unsigned int single_col : 8;
+- unsigned int mult_col : 8;
+- unsigned int late_col : 8;
+- unsigned int deferred : 8;
++ unsigned int single_col : 8;
++ unsigned int mult_col : 8;
++ unsigned int late_col : 8;
++ unsigned int deferred : 8;
+ } reg_eth_rs_tr_cnt;
+ #define REG_RD_ADDR_eth_rs_tr_cnt 56
+
+ /* Register r_tr_cnt, scope eth, type r */
+ typedef struct {
+- unsigned int single_col : 8;
+- unsigned int mult_col : 8;
+- unsigned int late_col : 8;
+- unsigned int deferred : 8;
++ unsigned int single_col : 8;
++ unsigned int mult_col : 8;
++ unsigned int late_col : 8;
++ unsigned int deferred : 8;
+ } reg_eth_r_tr_cnt;
+ #define REG_RD_ADDR_eth_r_tr_cnt 60
+
+ /* Register rs_phy_cnt, scope eth, type rs */
+ typedef struct {
+- unsigned int carrier_loss : 8;
+- unsigned int sqe_err : 8;
+- unsigned int dummy1 : 16;
++ unsigned int carrier_loss : 8;
++ unsigned int sqe_err : 8;
++ unsigned int dummy1 : 16;
+ } reg_eth_rs_phy_cnt;
+ #define REG_RD_ADDR_eth_rs_phy_cnt 64
+
+ /* Register r_phy_cnt, scope eth, type r */
+ typedef struct {
+- unsigned int carrier_loss : 8;
+- unsigned int sqe_err : 8;
+- unsigned int dummy1 : 16;
++ unsigned int carrier_loss : 8;
++ unsigned int sqe_err : 8;
++ unsigned int dummy1 : 16;
+ } reg_eth_r_phy_cnt;
+ #define REG_RD_ADDR_eth_r_phy_cnt 68
+
+ /* Register rw_test_ctrl, scope eth, type rw */
+ typedef struct {
+- unsigned int snmp_inc : 1;
+- unsigned int snmp : 1;
+- unsigned int backoff : 1;
+- unsigned int dummy1 : 29;
++ unsigned int snmp_inc : 1;
++ unsigned int snmp : 1;
++ unsigned int backoff : 1;
++ unsigned int dummy1 : 29;
+ } reg_eth_rw_test_ctrl;
+ #define REG_RD_ADDR_eth_rw_test_ctrl 72
+ #define REG_WR_ADDR_eth_rw_test_ctrl 72
+
+ /* Register rw_intr_mask, scope eth, type rw */
+ typedef struct {
+- unsigned int crc : 1;
+- unsigned int align : 1;
+- unsigned int oversize : 1;
+- unsigned int congestion : 1;
+- unsigned int single_col : 1;
+- unsigned int mult_col : 1;
+- unsigned int late_col : 1;
+- unsigned int deferred : 1;
+- unsigned int carrier_loss : 1;
+- unsigned int sqe_test_err : 1;
+- unsigned int orun : 1;
+- unsigned int urun : 1;
+- unsigned int exc_col : 1;
+- unsigned int mdio : 1;
+- unsigned int dummy1 : 18;
++ unsigned int crc : 1;
++ unsigned int align : 1;
++ unsigned int oversize : 1;
++ unsigned int congestion : 1;
++ unsigned int single_col : 1;
++ unsigned int mult_col : 1;
++ unsigned int late_col : 1;
++ unsigned int deferred : 1;
++ unsigned int carrier_loss : 1;
++ unsigned int sqe_test_err : 1;
++ unsigned int orun : 1;
++ unsigned int urun : 1;
++ unsigned int exc_col : 1;
++ unsigned int mdio : 1;
++ unsigned int dummy1 : 18;
+ } reg_eth_rw_intr_mask;
+ #define REG_RD_ADDR_eth_rw_intr_mask 76
+ #define REG_WR_ADDR_eth_rw_intr_mask 76
+
+ /* Register rw_ack_intr, scope eth, type rw */
+ typedef struct {
+- unsigned int crc : 1;
+- unsigned int align : 1;
+- unsigned int oversize : 1;
+- unsigned int congestion : 1;
+- unsigned int single_col : 1;
+- unsigned int mult_col : 1;
+- unsigned int late_col : 1;
+- unsigned int deferred : 1;
+- unsigned int carrier_loss : 1;
+- unsigned int sqe_test_err : 1;
+- unsigned int orun : 1;
+- unsigned int urun : 1;
+- unsigned int exc_col : 1;
+- unsigned int mdio : 1;
+- unsigned int dummy1 : 18;
++ unsigned int crc : 1;
++ unsigned int align : 1;
++ unsigned int oversize : 1;
++ unsigned int congestion : 1;
++ unsigned int single_col : 1;
++ unsigned int mult_col : 1;
++ unsigned int late_col : 1;
++ unsigned int deferred : 1;
++ unsigned int carrier_loss : 1;
++ unsigned int sqe_test_err : 1;
++ unsigned int orun : 1;
++ unsigned int urun : 1;
++ unsigned int exc_col : 1;
++ unsigned int mdio : 1;
++ unsigned int dummy1 : 18;
+ } reg_eth_rw_ack_intr;
+ #define REG_RD_ADDR_eth_rw_ack_intr 80
+ #define REG_WR_ADDR_eth_rw_ack_intr 80
+
+ /* Register r_intr, scope eth, type r */
+ typedef struct {
+- unsigned int crc : 1;
+- unsigned int align : 1;
+- unsigned int oversize : 1;
+- unsigned int congestion : 1;
+- unsigned int single_col : 1;
+- unsigned int mult_col : 1;
+- unsigned int late_col : 1;
+- unsigned int deferred : 1;
+- unsigned int carrier_loss : 1;
+- unsigned int sqe_test_err : 1;
+- unsigned int orun : 1;
+- unsigned int urun : 1;
+- unsigned int exc_col : 1;
+- unsigned int mdio : 1;
+- unsigned int dummy1 : 18;
++ unsigned int crc : 1;
++ unsigned int align : 1;
++ unsigned int oversize : 1;
++ unsigned int congestion : 1;
++ unsigned int single_col : 1;
++ unsigned int mult_col : 1;
++ unsigned int late_col : 1;
++ unsigned int deferred : 1;
++ unsigned int carrier_loss : 1;
++ unsigned int sqe_test_err : 1;
++ unsigned int orun : 1;
++ unsigned int urun : 1;
++ unsigned int exc_col : 1;
++ unsigned int mdio : 1;
++ unsigned int dummy1 : 18;
+ } reg_eth_r_intr;
+ #define REG_RD_ADDR_eth_r_intr 84
+
+ /* Register r_masked_intr, scope eth, type r */
+ typedef struct {
+- unsigned int crc : 1;
+- unsigned int align : 1;
+- unsigned int oversize : 1;
+- unsigned int congestion : 1;
+- unsigned int single_col : 1;
+- unsigned int mult_col : 1;
+- unsigned int late_col : 1;
+- unsigned int deferred : 1;
+- unsigned int carrier_loss : 1;
+- unsigned int sqe_test_err : 1;
+- unsigned int orun : 1;
+- unsigned int urun : 1;
+- unsigned int exc_col : 1;
+- unsigned int mdio : 1;
+- unsigned int dummy1 : 18;
++ unsigned int crc : 1;
++ unsigned int align : 1;
++ unsigned int oversize : 1;
++ unsigned int congestion : 1;
++ unsigned int single_col : 1;
++ unsigned int mult_col : 1;
++ unsigned int late_col : 1;
++ unsigned int deferred : 1;
++ unsigned int carrier_loss : 1;
++ unsigned int sqe_test_err : 1;
++ unsigned int orun : 1;
++ unsigned int urun : 1;
++ unsigned int exc_col : 1;
++ unsigned int mdio : 1;
++ unsigned int dummy1 : 18;
+ } reg_eth_r_masked_intr;
+ #define REG_RD_ADDR_eth_r_masked_intr 88
+
+-
+ /* Constants */
+ enum {
+- regk_eth_discard = 0x00000000,
+- regk_eth_ether = 0x00000000,
+- regk_eth_full = 0x00000001,
+- regk_eth_gmii = 0x00000003,
+- regk_eth_gtxclk = 0x00000001,
+- regk_eth_half = 0x00000000,
+- regk_eth_hsh = 0x00000001,
+- regk_eth_mii = 0x00000001,
+- regk_eth_mii_arec = 0x00000002,
+- regk_eth_mii_clk = 0x00000000,
+- regk_eth_no = 0x00000000,
+- regk_eth_phyrst = 0x00000000,
+- regk_eth_rec = 0x00000001,
+- regk_eth_rw_ga_hi_default = 0x00000000,
+- regk_eth_rw_ga_lo_default = 0x00000000,
+- regk_eth_rw_gen_ctrl_default = 0x00000000,
+- regk_eth_rw_intr_mask_default = 0x00000000,
+- regk_eth_rw_ma0_hi_default = 0x00000000,
+- regk_eth_rw_ma0_lo_default = 0x00000000,
+- regk_eth_rw_ma1_hi_default = 0x00000000,
+- regk_eth_rw_ma1_lo_default = 0x00000000,
+- regk_eth_rw_mgm_ctrl_default = 0x00000000,
+- regk_eth_rw_test_ctrl_default = 0x00000000,
+- regk_eth_size1518 = 0x000005ee,
+- regk_eth_size1522 = 0x000005f2,
+- regk_eth_yes = 0x00000001
++ regk_eth_discard = 0x00000000,
++ regk_eth_ether = 0x00000000,
++ regk_eth_full = 0x00000001,
++ regk_eth_gmii = 0x00000003,
++ regk_eth_gtxclk = 0x00000001,
++ regk_eth_half = 0x00000000,
++ regk_eth_hsh = 0x00000001,
++ regk_eth_mii = 0x00000001,
++ regk_eth_mii_arec = 0x00000002,
++ regk_eth_mii_clk = 0x00000000,
++ regk_eth_no = 0x00000000,
++ regk_eth_phyrst = 0x00000000,
++ regk_eth_rec = 0x00000001,
++ regk_eth_rw_ga_hi_default = 0x00000000,
++ regk_eth_rw_ga_lo_default = 0x00000000,
++ regk_eth_rw_gen_ctrl_default = 0x00000000,
++ regk_eth_rw_intr_mask_default = 0x00000000,
++ regk_eth_rw_ma0_hi_default = 0x00000000,
++ regk_eth_rw_ma0_lo_default = 0x00000000,
++ regk_eth_rw_ma1_hi_default = 0x00000000,
++ regk_eth_rw_ma1_lo_default = 0x00000000,
++ regk_eth_rw_mgm_ctrl_default = 0x00000000,
++ regk_eth_rw_test_ctrl_default = 0x00000000,
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ regk_eth_size1518 = 0x000005ee,
++ regk_eth_size1522 = 0x000005f2,
++#else
++ regk_eth_size1518 = 0x00000000,
++ regk_eth_size1522 = 0x00000001,
++#endif
++ regk_eth_yes = 0x00000001
+ };
++
+ #endif /* __eth_defs_h */
+diff -Nur linux-4.4.6.orig/drivers/net/cris/eth_v32.c linux-4.4.6/drivers/net/cris/eth_v32.c
+--- linux-4.4.6.orig/drivers/net/cris/eth_v32.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.4.6/drivers/net/cris/eth_v32.c 2016-03-20 15:09:58.183871830 +0100
+@@ -0,0 +1,3062 @@
++/*
++ * Driver for the ETRAX FS/Artpec-3 network controller.
++ *
++ * Copyright (c) 2003-2008 Axis Communications AB.
++ *
++ * TODO:
++ * * Decrease the amount of code running with interrupts disabled.
++ * * Rework the error handling so that we do not need to touch the tx
++ * ring from the error interrupts. When done, we should be able to
++ * do tx completition from the NAPI loop without disabling interrupts.
++ * * Remove the gigabit code. It's probably never going to be used.
++ */
++
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/types.h>
++#include <linux/fcntl.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <linux/mii.h>
++
++#include <asm/io.h> /* CRIS_LED_* I/O functions */
++#include <asm/irq.h>
++#include <hwregs/reg_map.h>
++#include <hwregs/reg_rdwr.h>
++#include <hwregs/dma.h>
++#include <hwregs/eth_defs.h>
++#ifdef CONFIG_ETRAXFS
++#include <hwregs/config_defs.h>
++#else
++#include <hwregs/clkgen_defs.h>
++#endif
++#include <hwregs/intr_vect_defs.h>
++#include <hwregs/strmux_defs.h>
++#include <asm/bitops.h>
++#include <asm/ethernet.h>
++#include <mach/dma.h>
++#include <pinmux.h>
++
++#include "eth_v32.h"
++
++#ifndef CONFIG_ETRAXFS
++#define ETH0_INTR_VECT ETH_INTR_VECT
++#define ETH1_INTR_VECT ETH_INTR_VECT
++#define regi_eth0 regi_eth
++#define regi_eth1 regi_
++#endif
++
++#define DEBUG(x)
++#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01)
++
++#if defined(CONFIG_ETRAX_HAVE_PHY) || defined(CONFIG_ETRAX_PHY_FALLBACK)
++#define RESET_PHY 1
++#else
++#define RESET_PHY 0
++#endif
++
++enum {
++ HAVE_PHY,
++ NO_PHY,
++ FALLBACK_PHY,
++};
++#if defined(CONFIG_ETRAX_PHY_FALLBACK)
++#define PHY_MODE (FALLBACK_PHY)
++#elif defined(CONFIG_ETRAX_NO_PHY)
++#define PHY_MODE (NO_PHY)
++#elif defined(CONFIG_ETRAX_HAVE_PHY)
++#define PHY_MODE (HAVE_PHY)
++#else
++#error Unknown PHY behaviour
++#endif
++
++static struct {
++ const char str[ETH_GSTRING_LEN];
++} const ethtool_stats_keys[] = {
++ { "tx_dma_restarts" },
++ { "tx_mac_resets" },
++ { "rx_dma_restarts" },
++ { "rx_dma_timeouts" },
++ { " dropped_rx" }
++};
++
++static void crisv32_eth_check_speed(unsigned long idev);
++static void crisv32_eth_check_duplex(unsigned long idev);
++static void update_rx_stats(struct crisv32_ethernet_local *np);
++static void update_tx_stats(struct crisv32_ethernet_local *np);
++static int crisv32_eth_poll(struct napi_struct *napi, int budget);
++static void crisv32_eth_setup_controller(struct net_device *dev);
++static int crisv32_eth_request_irqdma(struct net_device *dev);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++static void
++crisv32_eth_restart_rx_dma(struct net_device* dev,
++ struct crisv32_ethernet_local *np);
++#endif
++#if 0
++static void crisv32_ethernet_bug(struct net_device *dev);
++#endif
++
++/*
++ * The name of the card. Is used for messages and in the requests for
++ * io regions, irqs and dma channels.
++ */
++#ifdef CONFIG_ETRAXFS
++static const char cardname[] = "ETRAX FS built-in ethernet controller";
++#else
++static const char cardname[] = "ARTPEC-3 built-in ethernet controller";
++#endif
++
++/* Some chipset needs special care. */
++#ifndef CONFIG_ETRAX_NO_PHY
++struct transceiver_ops transceivers[] = {
++ {0x1018, broadcom_check_speed, broadcom_check_duplex},
++ {0x50EF, broadcom_check_speed, broadcom_check_duplex},
++ /* TDK 2120 and TDK 2120C */
++ {0xC039, tdk_check_speed, tdk_check_duplex},
++ {0x039C, tdk_check_speed, tdk_check_duplex},
++ /* Intel LXT972A*/
++ {0x04de, intel_check_speed, intel_check_duplex},
++ /* National Semiconductor DP83865 */
++ {0x0017, national_check_speed, national_check_duplex},
++ /* Vitesse VCS8641 */
++ {0x01c1, vitesse_check_speed, vitesse_check_duplex},
++ /* Davicom DM9161 */
++ {0x606E, davicom_check_speed, davicom_check_duplex},
++ /* Generic, must be last. */
++ {0x0000, generic_check_speed, generic_check_duplex}
++};
++#endif
++
++static struct net_device *crisv32_dev[2];
++static struct crisv32_eth_leds *crisv32_leds[3];
++
++/* Default MAC address for interface 0.
++ * The real one will be set later. */
++static struct sockaddr default_mac_iface0 =
++ {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00}};
++
++#ifdef CONFIG_CPU_FREQ
++static int
++crisv32_ethernet_freq_notifier(struct notifier_block *nb, unsigned long val,
++ void *data);
++
++static struct notifier_block crisv32_ethernet_freq_notifier_block = {
++ .notifier_call = crisv32_ethernet_freq_notifier
++};
++#endif
++
++static void receive_timeout(unsigned long arg);
++static void receive_timeout_work(struct work_struct* work);
++static void transmit_timeout(unsigned long arg);
++
++/*
++ * mask in and out tx/rx interrupts.
++ */
++static inline void crisv32_disable_tx_ints(struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_no };
++ REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
++}
++
++static inline void crisv32_enable_tx_ints(struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_yes };
++ REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx);
++}
++
++static inline void crisv32_disable_rx_ints(struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_no };
++ REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
++}
++
++static inline void crisv32_enable_rx_ints(struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_yes };
++ REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx);
++}
++
++static inline void crisv32_disable_eth_ints(struct crisv32_ethernet_local *np)
++{
++ int intr_mask_nw = 0x0;
++ REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
++}
++
++static inline void crisv32_enable_eth_ints(struct crisv32_ethernet_local *np)
++{
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ /* For Artpec-3 we use overrun to workaround voodoo TR 87 */
++ int intr_mask_nw = 0x1c00;
++#else
++ int intr_mask_nw = 0x1800;
++#endif
++ REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
++}
++
++static inline int crisv32_eth_gigabit(struct crisv32_ethernet_local *np)
++{
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ return np->gigabit_mode;
++#else
++ return 0;
++#endif
++}
++
++static inline void crisv32_eth_set_gigabit(struct crisv32_ethernet_local *np,
++ int g)
++{
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ np->gigabit_mode = g;
++#endif
++}
++
++/* start/stop receiver */
++static inline void crisv32_start_receiver(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rw_rec_ctrl rec_ctrl;
++
++ rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.ma0 = regk_eth_yes;
++ rec_ctrl.broadcast = regk_eth_rec;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++}
++
++static inline void crisv32_stop_receiver(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rw_rec_ctrl rec_ctrl;
++
++ rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.ma0 = regk_eth_no;
++ rec_ctrl.broadcast = regk_eth_discard;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++}
++
++static inline void crisv32_eth_reset(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rw_gen_ctrl gen_ctrl = { 0 };
++
++ gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
++ gen_ctrl.en = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++ gen_ctrl.en = regk_eth_yes;
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++}
++
++static void crisv32_eth_tx_cancel_frame(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rw_tr_ctrl tr_ctrl;
++
++ /* Cancel any pending transmits. This should bring us to the
++ excessive collisions state but it doesn't always do it. */
++ tr_ctrl = REG_RD(eth, np->eth_inst, rw_tr_ctrl);
++ tr_ctrl.cancel = 1;
++ REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
++ tr_ctrl.cancel = 0;
++ REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
++}
++
++/*
++ * Hack to disconnect/reconnect the dma from the ethernet block while we reset
++ * things. TODO: verify that we don't need to disconnect out channels and
++ * remove that code.
++ *
++ * ARTPEC-3 has only a single ethernet block so np->eth_inst is always eth0.
++ * The strmux values are named slightly different, redefine to avoid #ifdefs
++ * in the code blocks. For artpec3 only regk_strmux_eth0 and channel 0/1
++ * should be used.
++ */
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++#define regk_strmux_eth0 regk_strmux_eth
++#define regk_strmux_eth1 regk_strmux_eth
++#endif
++static inline void
++crisv32_disconnect_eth_tx_dma(struct crisv32_ethernet_local *np)
++{
++ reg_strmux_rw_cfg strmux_cfg;
++
++ strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
++ if (np->eth_inst == regi_eth0)
++ strmux_cfg.dma0 = regk_strmux_off;
++ else
++ strmux_cfg.dma6 = regk_strmux_off;
++ REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
++}
++
++static inline void crisv32_connect_eth_tx_dma(struct crisv32_ethernet_local *np)
++{
++ reg_strmux_rw_cfg strmux_cfg;
++
++ strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
++ if (np->eth_inst == regi_eth0)
++ strmux_cfg.dma0 = regk_strmux_eth0;
++ else
++ strmux_cfg.dma6 = regk_strmux_eth1;
++ REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
++}
++
++static inline void
++crisv32_disconnect_eth_rx_dma(struct crisv32_ethernet_local *np)
++{
++ reg_strmux_rw_cfg strmux_cfg;
++
++ strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
++ if (np->eth_inst == regi_eth0)
++ strmux_cfg.dma1 = regk_strmux_off;
++ else
++ strmux_cfg.dma7 = regk_strmux_off;
++ REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
++}
++
++static inline void crisv32_connect_eth_rx_dma(struct crisv32_ethernet_local *np)
++{
++ reg_strmux_rw_cfg strmux_cfg;
++
++ strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg);
++ if (np->eth_inst == regi_eth0)
++ strmux_cfg.dma1 = regk_strmux_eth0;
++ else
++ strmux_cfg.dma7 = regk_strmux_eth1;
++ REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg);
++}
++
++static int dma_wait_busy(int inst, int timeout)
++{
++ reg_dma_rw_stream_cmd dma_sc;
++
++ do {
++ dma_sc = REG_RD(dma, inst, rw_stream_cmd);
++ } while (timeout-- > 0 && dma_sc.busy);
++ return dma_sc.busy;
++}
++
++static int __init crisv32_eth_request_irqdma(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ /* Allocate IRQs and DMAs. */
++ if (np->eth_inst == regi_eth0) {
++ if (request_irq(DMA0_INTR_VECT, crisv32tx_eth_interrupt,
++ 0, "Ethernet TX", dev)) {
++ return -EAGAIN;
++ }
++
++ if (request_irq(DMA1_INTR_VECT, crisv32rx_eth_interrupt,
++ 0, "Ethernet RX", dev))
++ goto err0_1;
++
++ if (crisv32_request_dma(0, cardname, DMA_VERBOSE_ON_ERROR,
++ 12500000, dma_eth0))
++ goto err0_2;
++
++ if (crisv32_request_dma(1, cardname, DMA_VERBOSE_ON_ERROR,
++ 12500000, dma_eth0))
++ goto err0_3;
++
++ if (request_irq(ETH0_INTR_VECT, crisv32nw_eth_interrupt, 0,
++ cardname, dev)) {
++ crisv32_free_dma(1);
++err0_3:
++ crisv32_free_dma(0);
++err0_2:
++ free_irq(DMA1_INTR_VECT, dev);
++err0_1:
++ free_irq(DMA0_INTR_VECT, dev);
++ return -EAGAIN;
++ }
++ } else {
++ if (request_irq(DMA6_INTR_VECT, crisv32tx_eth_interrupt,
++ 0, cardname, dev))
++ return -EAGAIN;
++
++ if (request_irq(DMA7_INTR_VECT, crisv32rx_eth_interrupt,
++ 0, cardname, dev))
++ goto err1_1;
++
++ if (crisv32_request_dma(6, cardname, DMA_VERBOSE_ON_ERROR,
++ 0, dma_eth1))
++ goto err1_2;
++
++ if (crisv32_request_dma(7, cardname, DMA_VERBOSE_ON_ERROR,
++ 0, dma_eth1))
++ goto err1_3;
++
++ if (request_irq(ETH1_INTR_VECT, crisv32nw_eth_interrupt, 0,
++ cardname, dev)) {
++ crisv32_free_dma(7);
++err1_3:
++ crisv32_free_dma(6);
++err1_2:
++ free_irq(DMA7_INTR_VECT, dev);
++err1_1:
++ free_irq(DMA6_INTR_VECT, dev);
++ return -EAGAIN;
++ }
++ }
++ return 0;
++}
++
++static int __init crisv32_eth_init_phy(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
++
++ if (RESET_PHY) {
++#ifdef CONFIG_ETRAXFS
++ reg_config_rw_pad_ctrl pad_ctrl;
++ pad_ctrl = REG_RD(config, regi_config, rw_pad_ctrl);
++ pad_ctrl.phyrst_n = 0;
++ REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
++
++ udelay(500); /* RESET_LEN */
++
++ pad_ctrl.phyrst_n = 1;
++ REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl);
++#else
++ reg_eth_rw_gen_ctrl gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
++ gen_ctrl.phyrst_n = 0;
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++
++ udelay(500); /* RESET_LEN */
++
++ gen_ctrl.phyrst_n = 1;
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++#endif
++
++ udelay(200); /* RESET_WAIT */
++ }
++
++ switch (PHY_MODE) {
++ case FALLBACK_PHY:
++ /* Fall back on using fixed iff there is no PHY on */
++ /* the MDIO bus */
++ np->fixed_phy = crisv32_eth_probe_transceiver(dev) != 0;
++ if (np->fixed_phy)
++ printk(KERN_WARNING
++ "eth: No transciever found, falling back "
++ "to fixed phy mode\n");
++ break;
++
++ case NO_PHY:
++ /* Don't even bother looking for a PHY, always rely */
++ /* on fixed PHY */
++ np->fixed_phy = 1;
++ break;
++
++ default: /* HAVE_PHY */
++ /* Look for a PHY and abort if there is none, */
++ /* otherwise just carry on */
++ if (crisv32_eth_probe_transceiver(dev)) {
++ printk(KERN_WARNING
++ "eth: No transceiver found, "
++ "removing interface\n");
++ return -ENODEV;
++ }
++ np->fixed_phy = 0;
++ }
++
++ if (np->fixed_phy) {
++ reg_eth_rw_rec_ctrl rec_ctrl;
++
++ /* speed */
++ np->current_speed = 100;
++ np->current_speed_selection = 100; /* Auto. */
++
++ /* duplex */
++ np->full_duplex = 1;
++ np->current_duplex = full;
++
++ rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.duplex = regk_eth_full;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ } else {
++ np->mii_if.supports_gmii = mii_check_gmii_support(&np->mii_if);
++
++ /* speed */
++ np->current_speed = 10;
++ np->current_speed_selection = 0; /* Auto. */
++ np->speed_timer = timer_init;
++ np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
++ np->speed_timer.data = (unsigned long) dev;
++ np->speed_timer.function = crisv32_eth_check_speed;
++
++ /* duplex */
++ np->full_duplex = 0;
++ np->current_duplex = autoneg;
++ np->duplex_timer = timer_init;
++ np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
++ np->duplex_timer.data = (unsigned long) dev;
++ np->duplex_timer.function = crisv32_eth_check_duplex;
++ }
++
++ return 0;
++}
++
++static void __init crisv32_eth_setup_controller(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_eth_rw_gen_ctrl gen_ctrl;
++
++ reg_eth_rw_tr_ctrl tr_ctrl = {
++ /* SW retransmits to avoid transmitter bugs. */
++ .retry = regk_eth_no,
++ .pad = regk_eth_yes,
++ .crc = regk_eth_yes
++ };
++
++ reg_eth_rw_rec_ctrl rec_ctrl = {
++ .ma0 = regk_eth_no, /* enable at open() */
++ .broadcast = regk_eth_no,
++ .max_size = regk_eth_size1522
++ };
++
++ reg_eth_rw_ga_lo ga_lo = { 0 };
++ reg_eth_rw_ga_hi ga_hi = { 0 };
++
++ /*
++ * Initialize group address registers to make sure that no
++ * unwanted addresses are matched.
++ */
++ REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
++ REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
++
++ /* Configure receiver and transmitter */
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl);
++
++ /*
++ * Read from rw_gen_ctrl so that we don't override any previous
++ * configuration.
++ */
++ gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl);
++ gen_ctrl.phy = regk_eth_mii_clk;
++#ifdef CONFIG_ETRAXFS
++ /* On ETRAX FS, this bit has reversed meaning */
++ gen_ctrl.flow_ctrl = regk_eth_no;
++#else
++ gen_ctrl.flow_ctrl = regk_eth_yes;
++#endif
++
++ /* Enable ethernet controller with mii clk. */
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++ gen_ctrl.en = regk_eth_yes;
++ REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl);
++}
++
++static void crisv32_eth_reset_rx_ring(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int i;
++
++ /* cleanup the rx-ring */
++ for (i = 0; i < NBR_RX_DESC; i++) {
++ struct sk_buff *skb;
++ skb = np->dma_rx_descr_list[i].skb;
++ if (!skb
++ || (np->dma_rx_descr_list[i].descr.buf !=
++ (void *)virt_to_phys(skb->data))) {
++ if (skb)
++ dev_kfree_skb(skb);
++ skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
++ np->dma_rx_descr_list[i].skb = skb;
++ np->dma_rx_descr_list[i].descr.buf =
++ (char*)virt_to_phys(skb->data);
++ }
++ if (np->dma_rx_descr_list[i].descr.in_eop)
++ np->rx_restarts_dropped++;
++ np->dma_rx_descr_list[i].descr.after =
++ (char*)virt_to_phys(skb->data
++ + MAX_MEDIA_DATA_SIZE);
++ np->dma_rx_descr_list[i].descr.eol = 0;
++ np->dma_rx_descr_list[i].descr.in_eop = 0;
++ /* Workaround cache bug */
++ flush_dma_descr(&np->dma_rx_descr_list[i].descr, 1);
++ }
++
++ /* reset rx-ring */
++ np->active_rx_desc = &np->dma_rx_descr_list[0];
++ np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1];
++ np->last_rx_desc = np->prev_rx_desc;
++ np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1;
++ flush_dma_descr(&np->dma_rx_descr_list[NBR_RX_DESC - 1].descr, 0);
++ /* ready to accept new packets. */
++ np->new_rx_package = 1;
++
++ /* Fill context descriptors. */
++ np->ctxt_in.next = 0;
++ np->ctxt_in.saved_data =
++ (void *)virt_to_phys(&np->active_rx_desc->descr);
++ np->ctxt_in.saved_data_buf = np->active_rx_desc->descr.buf;
++}
++
++static inline int crisv32_eth_tx_ring_full(struct crisv32_ethernet_local *np)
++{
++ crisv32_eth_descr *active = np->active_tx_desc;
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ active = phys_to_virt((unsigned long)active->descr.next);
++#endif
++ if (active == np->catch_tx_desc)
++ return 1;
++ return 0;
++}
++
++static void crisv32_eth_reset_tx_ring(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ /* free un-handled tx packets */
++ while (np->txpackets || np->catch_tx_desc != np->active_tx_desc) {
++ np->txpackets--;
++ if (np->catch_tx_desc->skb)
++ dev_kfree_skb(np->catch_tx_desc->skb);
++
++ np->catch_tx_desc->skb = 0;
++ np->catch_tx_desc =
++ phys_to_virt((int)np->catch_tx_desc->descr.next);
++ }
++
++ WARN_ON(np->txpackets != 0);
++ np->txpackets = 0;
++
++ /* reset tx-ring */
++ np->dma_tx_descr_list[0].descr.buf =
++ np->dma_tx_descr_list[0].descr.after = 0;
++ np->dma_tx_descr_list[0].descr.eol = 1;
++
++ np->active_tx_desc = &np->dma_tx_descr_list[0];
++ np->prev_tx_desc = &np->dma_tx_descr_list[NBR_TX_DESC - 1];
++ np->catch_tx_desc = &np->dma_tx_descr_list[0];
++
++ np->ctxt_out.next = 0;
++ np->ctxt_out.saved_data =
++ (void *)virt_to_phys(&np->dma_tx_descr_list[0].descr);
++
++}
++
++static void crisv32_eth_reset_rings(struct net_device *dev)
++{
++ crisv32_eth_reset_tx_ring(dev);
++ crisv32_eth_reset_rx_ring(dev);
++}
++
++/*
++ * Really advance the receive ring. RX interrupts must be off.
++ */
++static void __crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np)
++{
++ if (np->newbuf)
++ np->active_rx_desc->descr.buf = (void *) np->newbuf;
++ np->active_rx_desc->descr.after =
++ np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE;
++ np->active_rx_desc->descr.eol = 1;
++ np->active_rx_desc->descr.in_eop = 0;
++ np->active_rx_desc = phys_to_virt((int)np->active_rx_desc->descr.next);
++ barrier();
++ np->prev_rx_desc->descr.eol = 0;
++
++ /* Workaround cache bug. */
++ flush_dma_descr(&np->prev_rx_desc->descr, 0);
++ np->prev_rx_desc = phys_to_virt((int)np->prev_rx_desc->descr.next);
++ flush_dma_descr(&np->prev_rx_desc->descr, 1);
++}
++
++/*
++ * Advance the receive ring. RX interrupts must be off.
++ */
++static inline void
++crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np)
++{
++ /*
++ * When the input DMA reaches eol precaution must be taken, otherwise
++ * the DMA could stop. The problem occurs if the eol flag is re-placed
++ * on the descriptor that the DMA stands on before the DMA proceed to
++ * the next descriptor. This case could, for example, happen if there
++ * is a traffic burst and then the network goes silent. To prevent this
++ * we make sure that we do not set the eol flag on the descriptor that
++ * the DMA stands on.
++ */
++ unsigned long dma_pos;
++
++ /* Get the current input dma position. */
++ dma_pos = REG_RD_INT(dma, np->dma_in_inst, rw_saved_data);
++
++ if (virt_to_phys(&np->active_rx_desc->descr) != dma_pos) {
++ crisv32_eth_descr *cur, *nxt;
++
++ /* Now really advance the ring one step. */
++ __crisv32_eth_rx_ring_advance(np);
++
++ cur = np->active_rx_desc;
++ nxt = (void *)phys_to_virt((unsigned long)cur->descr.next);
++ flush_dma_descr(&cur->descr, 0);
++ flush_dma_descr(&nxt->descr, 0);
++ if (!cur->descr.in_eop && nxt->descr.in_eop) {
++ /* TODO: Investigate this more. The DMA seems to have
++ skipped a descriptor, possibly due to incoherence
++ between the CPU L1 cache and the DMA updates to the
++ descriptor. */
++ np->newbuf = (unsigned long) np->active_rx_desc->descr.buf;
++ __crisv32_eth_rx_ring_advance(np);
++ }
++ /* flush after peek. */
++ flush_dma_descr(&cur->descr, 0);
++ flush_dma_descr(&nxt->descr, 0);
++ } else {
++ /* delay the advancing of the ring. */
++ np->new_rx_package = 0;
++ }
++}
++
++static void __init crisv32_eth_init_rings(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int i;
++
++ /* Initialise receive descriptors for interface. */
++ for (i = 0; i < NBR_RX_DESC; i++) {
++ struct sk_buff *skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
++
++ np->dma_rx_descr_list[i].skb = skb;
++ np->dma_rx_descr_list[i].descr.buf =
++ (char*)virt_to_phys(skb->data);
++ np->dma_rx_descr_list[i].descr.after =
++ (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE);
++
++ np->dma_rx_descr_list[i].descr.eol = 0;
++ np->dma_rx_descr_list[i].descr.in_eop = 0;
++ np->dma_rx_descr_list[i].descr.next =
++ (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr);
++ }
++ /* bend the list into a ring */
++ np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next =
++ (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr);
++
++ /* Initialize transmit descriptors. */
++ for (i = 0; i < NBR_TX_DESC; i++) {
++ np->dma_tx_descr_list[i].descr.wait = 1;
++ np->dma_tx_descr_list[i].descr.eol = 0;
++ np->dma_tx_descr_list[i].descr.out_eop = 0;
++ np->dma_tx_descr_list[i].descr.next =
++ (void*)virt_to_phys(&np->dma_tx_descr_list[i+1].descr);
++ }
++ /* bend the list into a ring */
++ np->dma_tx_descr_list[NBR_TX_DESC - 1].descr.next =
++ (void *) virt_to_phys(&np->dma_tx_descr_list[0].descr);
++
++ crisv32_eth_reset_rings(dev);
++}
++
++static void __init crisv32_init_leds(int ledgrp, struct net_device *dev)
++{
++ struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ /* Use already allocated led grp if initialized */
++ if (crisv32_leds[ledgrp] != NULL) {
++ np->leds = crisv32_leds[ledgrp];
++ return;
++ }
++
++ crisv32_leds[ledgrp] =
++ kmalloc(sizeof(struct crisv32_eth_leds), GFP_KERNEL);
++
++ crisv32_leds[ledgrp]->ledgrp = ledgrp;
++ crisv32_leds[ledgrp]->led_active = 0;
++ crisv32_leds[ledgrp]->ifisup[0] = 0;
++ crisv32_leds[ledgrp]->ifisup[1] = 0;
++ /* NOTE: Should this value be set to zero as the jiffies timer
++ can wrap? */
++ crisv32_leds[ledgrp]->led_next_time = jiffies;
++
++ crisv32_leds[ledgrp]->clear_led_timer = timer_init;
++ crisv32_leds[ledgrp]->clear_led_timer.function =
++ crisv32_clear_network_leds;
++ crisv32_leds[ledgrp]->clear_led_timer.data = (unsigned long) dev;
++
++ spin_lock_init(&crisv32_leds[ledgrp]->led_lock);
++
++ np->leds = crisv32_leds[ledgrp];
++}
++
++static int __init crisv32_ethernet_init(void)
++{
++ struct crisv32_ethernet_local *np;
++ int ret = 0;
++
++#ifdef CONFIG_ETRAXFS
++ printk("ETRAX FS 10/100MBit ethernet v0.01 (c)"
++ " 2003 Axis Communications AB\n");
++#else
++ printk("ARTPEC-3 10/100 MBit ethernet (c)"
++ " 2003-2009 Axis Communications AB\n");
++#endif
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ {
++ reg_clkgen_rw_clk_ctrl clk_ctrl = REG_RD(clkgen, regi_clkgen,
++ rw_clk_ctrl);
++ clk_ctrl.eth = clk_ctrl.dma0_1_eth = regk_clkgen_yes;
++ REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl);
++ }
++#endif
++{
++ int iface0 = 0;
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ if (crisv32_pinmux_alloc_fixed(pinmux_eth))
++ panic("Eth pinmux\n");
++#endif
++
++ if (!(crisv32_dev[iface0] = alloc_etherdev(sizeof *np)))
++ return -ENOMEM;
++
++ ret |= crisv32_ethernet_device_init(crisv32_dev[iface0]);
++
++ crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface0]);
++
++ np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface0]);
++ np->eth_inst = regi_eth0;
++ np->dma_out_inst = regi_dma0;
++ np->dma_in_inst = regi_dma1;
++
++ np->mii_if.dev = crisv32_dev[iface0];
++ np->mii_if.mdio_read = crisv32_eth_get_mdio_reg;
++ np->mii_if.mdio_write = crisv32_eth_set_mdio_reg;
++ np->mii_if.phy_id_mask = 0x1f;
++ np->mii_if.reg_num_mask = 0x1f;
++
++ np->use_leds = 1;
++ np->autoneg_normal = 1;
++
++
++ register_netdev(crisv32_dev[iface0]);
++
++ /* Set up default MAC address */
++ memcpy(crisv32_dev[iface0]->dev_addr, default_mac_iface0.sa_data, 6);
++ crisv32_eth_set_mac_address(crisv32_dev[iface0], &default_mac_iface0);
++ if (crisv32_eth_request_irqdma(crisv32_dev[iface0]))
++ printk("%s: eth0 unable to allocate IRQ and DMA resources\n",
++ __func__);
++ np->txpackets = 0;
++ crisv32_eth_init_rings(crisv32_dev[iface0]);
++ crisv32_eth_setup_controller(crisv32_dev[iface0]);
++ ret |= crisv32_eth_init_phy(crisv32_dev[iface0]);
++ if (ret) {
++ unregister_netdev(crisv32_dev[iface0]);
++ return ret;
++ }
++}
++
++#ifdef CONFIG_ETRAX_ETHERNET_IFACE1
++{
++ int iface1 = 0;
++ /* Default MAC address for interface 1.
++ * The real one will be set later. */
++ static struct sockaddr default_mac_iface1 =
++ {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x01}};
++
++ if (crisv32_pinmux_alloc_fixed(pinmux_eth1))
++ panic("Eth pinmux\n");
++
++ /* Increase index to device array if interface 0 is enabled as well.*/
++#ifdef CONFIG_ETRAX_ETHERNET_IFACE0
++ iface1++;
++#endif
++ if (!(crisv32_dev[iface1] = alloc_etherdev(sizeof *np)))
++ return -ENOMEM;
++
++ ret |= crisv32_ethernet_device_init(crisv32_dev[iface1]);
++
++ crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface1]);
++
++ np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface1]);
++ np->eth_inst = regi_eth1;
++ np->dma_out_inst = regi_dma6;
++ np->dma_in_inst = regi_dma7;
++
++ np->mii_if.dev = crisv32_dev[iface1];
++ np->mii_if.mdio_read = crisv32_eth_get_mdio_reg;
++ np->mii_if.mdio_write = crisv32_eth_set_mdio_reg;
++ np->mii_if.phy_id_mask = 0x1f;
++ np->mii_if.reg_num_mask = 0x1f;
++
++
++ register_netdev(crisv32_dev[iface1]);
++
++ /* Set up default MAC address */
++ memcpy(crisv32_dev[iface1]->dev_addr, default_mac_iface1.sa_data, 6);
++ crisv32_eth_set_mac_address(crisv32_dev[iface1], &default_mac_iface1);
++
++ if (crisv32_eth_request_irqdma(crisv32_dev[iface1]))
++ printk("%s: eth1 unable to allocate IRQ and DMA resources\n",
++ __func__);
++ np->txpackets = 0;
++ crisv32_eth_init_rings(crisv32_dev[iface1]);
++ crisv32_eth_setup_controller(crisv32_dev[iface1]);
++ ret |= crisv32_eth_init_phy(crisv32_dev[iface1]);
++ if (ret) {
++ unregister_netdev(crisv32_dev[iface1]);
++ return ret;
++ }
++}
++#endif /* CONFIG_ETRAX_ETHERNET_IFACE1 */
++
++#ifdef CONFIG_CPU_FREQ
++ cpufreq_register_notifier(&crisv32_ethernet_freq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER);
++#endif
++
++ return ret;
++}
++
++static struct net_device_ops crisv32_netdev_ops = {
++ .ndo_open = crisv32_eth_open,
++ .ndo_stop = crisv32_eth_close,
++ .ndo_start_xmit = crisv32_eth_send_packet,
++ .ndo_set_rx_mode = crisv32_eth_set_rx_mode,
++ .ndo_validate_addr = eth_validate_addr,
++ .ndo_set_mac_address = crisv32_eth_set_mac_address,
++ .ndo_do_ioctl =crisv32_eth_ioctl,
++ .ndo_get_stats = crisv32_get_stats,
++ .ndo_tx_timeout = crisv32_eth_do_tx_recovery,
++ .ndo_set_config = crisv32_eth_set_config,
++};
++
++static int __init crisv32_ethernet_device_init(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np;
++ struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0);
++
++ dev->base_addr = 0; /* Just to have something to show. */
++
++ /* we do our own locking */
++ dev->features |= NETIF_F_LLTX;
++
++ /* We use several IRQs and DMAs so just report 0 here. */
++ dev->irq = 0;
++ dev->dma = 0;
++
++ /*
++ * Fill in our handlers so the network layer can talk to us in the
++ * future.
++ */
++ dev->netdev_ops = &crisv32_netdev_ops;
++ dev->ethtool_ops = &crisv32_ethtool_ops;
++ dev->watchdog_timeo = HZ * 10;
++#ifdef CONFIG_NET_POLL_CONTROLLER
++ dev->poll_controller = crisv32_netpoll;
++#endif
++ np = netdev_priv(dev);
++ np->dev = dev;
++
++ /*
++ * 8 skbs keeps the system very reponsive even under high load.
++ * At 64 the system locks, pretty much the same way as without NAPI.
++ *
++ * TODO: meassure with 2 interfaces
++ */
++ netif_napi_add(dev, &np->napi, crisv32_eth_poll, 8);
++
++ spin_lock_init(&np->lock);
++ spin_lock_init(&np->transceiver_lock);
++
++ np->receive_timer = timer_init;
++ np->receive_timer.data = (unsigned)dev;
++ np->receive_timer.function = receive_timeout;
++
++ INIT_WORK(&np->receive_work, receive_timeout_work);
++
++ np->transmit_timer = timer_init;
++ np->transmit_timer.data = (unsigned)dev;
++ np->transmit_timer.function = transmit_timeout;
++
++ return 0;
++}
++
++static int crisv32_eth_open(struct net_device *dev)
++{
++ struct sockaddr mac_addr;
++ reg_dma_rw_ack_intr ack_intr = { .data = 1, .in_eop = 1 };
++ reg_eth_rw_clr_err clr_err = {.clr = regk_eth_yes};
++ /*
++ * dont interrupt us at any stat counter thresholds, only at urun
++ * and exc_col.
++ */
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ /* For Artpec-3 we use overrun to workaround voodoo TR 87 */
++ int intr_mask_nw = 0x1c00;
++#else
++ int intr_mask_nw = 0x1800;
++#endif
++ int eth_ack_intr = 0xffff;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ spin_lock(&np->lock);
++ crisv32_eth_set_gigabit(np, 0);
++
++ crisv32_disable_tx_ints(np);
++ crisv32_disable_rx_ints(np);
++
++ REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
++ REG_WR_INT(eth, np->eth_inst, rw_ack_intr, eth_ack_intr);
++ REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw);
++ crisv32_eth_reset_rings(dev);
++
++ /* Give the hardware an idea of what MAC address we want. */
++ memcpy(mac_addr.sa_data, dev->dev_addr, dev->addr_len);
++ crisv32_eth_set_mac_address(dev, &mac_addr);
++
++ /* Enable irq and make sure that the irqs are cleared. */
++ REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++
++ crisv32_disconnect_eth_rx_dma(np);
++
++ /* Prepare input DMA. */
++ DMA_RESET(np->dma_in_inst);
++ DMA_ENABLE(np->dma_in_inst);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
++#endif
++ DMA_START_CONTEXT(np->dma_in_inst, virt_to_phys(&np->ctxt_in));
++ DMA_CONTINUE(np->dma_in_inst);
++ crisv32_enable_rx_ints(np);
++ crisv32_start_receiver(np);
++
++ /* Prepare output DMA. */
++ DMA_RESET(np->dma_out_inst);
++ DMA_ENABLE(np->dma_out_inst);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
++#endif
++ crisv32_connect_eth_rx_dma(np);
++
++ netif_start_queue(dev);
++ crisv32_enable_tx_ints(np);
++
++ if (!np->fixed_phy) {
++ /* Start duplex/speed timers */
++ if (!timer_pending(&np->speed_timer))
++ add_timer(&np->speed_timer);
++ if (!timer_pending(&np->duplex_timer))
++ add_timer(&np->duplex_timer);
++ }
++
++ spin_unlock(&np->lock);
++ /*
++ * We are now ready to accept transmit requests from the queueing
++ * layer of the networking.
++ */
++ np->link = 1;
++ netif_carrier_on(dev);
++ napi_enable(&np->napi);
++
++ return 0;
++}
++
++static int crisv32_eth_close(struct net_device *dev)
++{
++ reg_dma_rw_ack_intr ack_intr = {0};
++
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ unsigned long flags;
++
++ del_timer(&np->transmit_timer);
++ spin_lock_irqsave(&np->lock, flags);
++
++ /* stop the receiver before the DMA channels to avoid overruns. */
++ crisv32_disable_rx_ints(np);
++ napi_disable(&np->napi);
++ crisv32_stop_receiver(np);
++
++ netif_stop_queue(dev);
++
++ /* Reset the TX DMA in case it has hung on something. */
++ DMA_RESET(np->dma_in_inst);
++
++ /* Stop DMA */
++ DMA_STOP(np->dma_in_inst);
++ DMA_STOP(np->dma_out_inst);
++
++ /* Disable irq and make sure that the irqs are cleared. */
++ crisv32_disable_tx_ints(np);
++ ack_intr.data = 1;
++ REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
++
++ ack_intr.in_eop = 1;
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++
++ np->sender_started = 0;
++ spin_unlock_irqrestore(&np->lock, flags);
++
++ /* Update the statistics. */
++ update_rx_stats(np);
++ update_tx_stats(np);
++
++ if (!np->fixed_phy) {
++ /* Stop speed/duplex timers */
++ del_timer(&np->speed_timer);
++ del_timer(&np->duplex_timer);
++ }
++
++ return 0;
++}
++
++static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr)
++{
++ int i;
++ static int first = 1;
++
++ unsigned char *addr = ((struct sockaddr*)vpntr)->sa_data;
++
++ reg_eth_rw_ma0_lo ma0_lo =
++ { addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24)};
++
++ reg_eth_rw_ma0_hi ma0_hi = { addr[4] | (addr[5] << 8) };
++
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ /* Remember the address. */
++ memcpy(dev->dev_addr, addr, dev->addr_len);
++
++ /*
++ * Write the address to the hardware.
++ * Note the way the address is wrapped:
++ * ma0_l0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24);
++ * ma0_hi = a0_4 | (a0_5 << 8);
++ */
++ REG_WR(eth, np->eth_inst, rw_ma0_lo, ma0_lo);
++ REG_WR(eth, np->eth_inst, rw_ma0_hi, ma0_hi);
++
++ if (first) {
++ printk(KERN_INFO "%s: changed MAC to ", dev->name);
++
++ for (i = 0; i < 5; i++)
++ printk("%02X:", dev->dev_addr[i]);
++ printk("%02X\n", dev->dev_addr[i]);
++
++ first = 0;
++ }
++
++ return 0;
++}
++
++static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id)
++{
++ struct net_device *dev = (struct net_device *) dev_id;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_dma_r_masked_intr masked_in;
++
++ masked_in = REG_RD(dma, np->dma_in_inst, r_masked_intr);
++
++ if (masked_in.in_eop) {
++ reg_dma_rw_ack_intr ack_intr = {0};
++
++ /*
++ * Ack the rx irq even if we are not prepared to start
++ * polling. This is needed to handle incomming packets
++ * during the stop sequence.
++ */
++ ack_intr.in_eop = 1;
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++
++ mod_timer(&np->receive_timer, jiffies + HZ);
++ np->do_rx_recovery = 0;
++
++ if (napi_schedule_prep(&np->napi)) {
++ crisv32_disable_rx_ints(np);
++ crisv32_disable_tx_ints(np);
++ /* put us onto the poll list */
++ __napi_schedule(&np->napi);
++ }
++ } else {
++ /* Unexpected, ACK it and hope for the best. */
++ reg_dma_rw_ack_intr ack_intr = {
++ .group = 1,
++ .ctxt = 1,
++ .data = 1,
++ .in_eop = 0,
++ .stream_cmd = 1,
++ .dummy1 = ~0
++ };
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static inline void crisv32_eth_roll_tx_timer(struct crisv32_ethernet_local *np)
++{
++ /* If there are more packets in the ring, roll the tx timer. */
++ if (np->txpackets) {
++ /* Eth pause frames may halt us for up to 320ms (100mbit). */
++ unsigned long timeout = jiffies + (HZ / 3) + 1;
++ mod_timer(&np->transmit_timer, timeout);
++ }
++ else
++ del_timer(&np->transmit_timer);
++}
++
++/* Call with np->lock held. */
++static void _crisv32_tx_ring_advance(struct crisv32_ethernet_local *np,
++ int cleanup)
++{
++ reg_dma_rw_stat stat;
++ dma_descr_data *dma_pos;
++ struct net_device *dev = np->dev;
++ int eol;
++
++ /* Get the current output dma position. */
++ dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
++ stat = REG_RD(dma, np->dma_out_inst, rw_stat);
++ eol = stat.list_state == regk_dma_data_at_eol;
++ if (cleanup || eol)
++ dma_pos = &np->active_tx_desc->descr;
++
++ /* Take care of transmited dma descriptors and report sent packet. */
++ while (np->txpackets && (&np->catch_tx_desc->descr != dma_pos)) {
++ /* Update sent packet statistics. */
++ np->stats.tx_bytes += np->catch_tx_desc->skb->len;
++ np->stats.tx_packets++;
++
++ dev_kfree_skb_any(np->catch_tx_desc->skb);
++ np->catch_tx_desc->skb = 0;
++ np->txpackets--;
++ np->catch_tx_desc->descr.buf = 0;
++ np->catch_tx_desc =
++ phys_to_virt((int)np->catch_tx_desc->descr.next);
++ np->do_tx_recovery = 0;
++ np->retrans = 0;
++
++ netif_wake_queue(dev);
++ }
++}
++
++static inline void crisv32_tx_ring_advance(struct crisv32_ethernet_local *np)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->lock, flags);
++ _crisv32_tx_ring_advance(np, 0);
++ crisv32_eth_roll_tx_timer(np);
++ spin_unlock_irqrestore(&np->lock, flags);
++}
++
++static inline int crisv32_tx_complete(struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_ack_intr ack_intr = { .data = 1 };
++ reg_dma_r_intr ints;
++ int r = 0;
++
++ /* We are interested in the unmasked raw interrupt source here. When
++ polling with tx interrupts masked off we still want to do
++ tx completition when the DMA makes progress. */
++ ints = REG_RD(dma, np->dma_out_inst, r_intr);
++ if (ints.data)
++ {
++ /* ack the interrupt, if it was active */
++ REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr);
++ crisv32_tx_ring_advance(np);
++ r = 1;
++ }
++ return r;
++}
++
++static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev_id);
++
++ crisv32_tx_complete(np);
++ return IRQ_HANDLED;
++}
++
++
++/* Update receive errors. */
++static void
++update_rx_stats(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rs_rec_cnt r;
++
++ r = REG_RD(eth, np->eth_inst, rs_rec_cnt);
++
++ np->stats.rx_over_errors += r.congestion;
++ np->stats.rx_crc_errors += r.crc_err;
++ np->stats.rx_frame_errors += r.align_err;
++ np->stats.rx_length_errors += r.oversize;
++ np->stats.rx_errors += r.crc_err + r.align_err +
++ r.oversize + r.congestion;
++}
++
++/* Update transmit errors. */
++static void update_tx_stats(struct crisv32_ethernet_local *np)
++{
++ reg_eth_rs_tr_cnt r;
++ reg_eth_rs_phy_cnt rp;
++
++ r = REG_RD(eth, np->eth_inst, rs_tr_cnt);
++ rp = REG_RD(eth, np->eth_inst, rs_phy_cnt);
++
++ /* r.deferred is not good for counting collisions because it also
++ includes frames that have to wait for the interframe gap. That
++ means we get deferred frames even when in full duplex.
++ Here we don't actually count the number of collisions that
++ occured (artpec3 seems to lack such a counter), instead we count
++ the number of frames that collide once or more. */
++ np->stats.collisions += r.mult_col + r.single_col;
++ np->stats.tx_window_errors += r.late_col;
++ np->stats.tx_carrier_errors += rp.carrier_loss;
++
++ /* Ordinary collisions are not errors, they are just part of
++ ethernet's bus arbitration and congestion control mechanisms.
++ Late collisions are serious errors though. */
++ np->stats.tx_errors += r.late_col;
++}
++
++/* Get current statistics. */
++static struct net_device_stats *crisv32_get_stats(struct net_device *dev)
++{
++ unsigned long flags;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ spin_lock_irqsave(&np->lock, flags);
++
++ update_rx_stats(np);
++ update_tx_stats(np);
++
++ spin_unlock_irqrestore(&np->lock, flags);
++
++ return &np->stats;
++}
++
++/* Check for network errors. This acknowledge the received interrupt. */
++static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id)
++{
++ struct net_device *dev = (struct net_device *) dev_id;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_eth_r_masked_intr intr_mask;
++ int ack_intr = 0xffff;
++ reg_eth_rw_clr_err clr_err;
++
++ intr_mask = REG_RD(eth, np->eth_inst, r_masked_intr);
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ /* Only apply the workaround if it is not already pending.
++ enable_eth_ints will re-enable the orun interrupt regardless
++ of pending_overrun. */
++ if (intr_mask.orun && !np->pending_overrun) {
++ reg_eth_rw_rec_ctrl rec_ctrl =
++ REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ np->saved_rec_ctrl = rec_ctrl;
++ np->overrun_set = 1;
++ DMA_STOP(np->dma_in_inst);
++ rec_ctrl.ma0 = regk_eth_no;
++ rec_ctrl.broadcast = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ np->saved_ga_lo = REG_RD_INT(eth, np->eth_inst, rw_ga_lo);
++ np->saved_ga_hi = REG_RD_INT(eth, np->eth_inst, rw_ga_hi);
++ REG_WR_INT(eth, np->eth_inst, rw_ga_lo, 0);
++ REG_WR_INT(eth, np->eth_inst, rw_ga_hi, 0);
++ REG_WR_INT(eth, np->eth_inst, rw_intr_mask,
++ REG_RD_INT(eth, np->eth_inst, rw_intr_mask) & 0xfbff);
++ REG_WR_INT(eth, np->eth_inst, rw_ack_intr, 0x400);
++ intr_mask.orun = 0;
++ np->pending_overrun = 1;
++ if (!np->napi_processing)
++ crisv32_eth_restart_rx_dma(np->dev, np);
++
++ return IRQ_HANDLED;
++ }
++#endif
++
++ /*
++ * Check for underrun and/or excessive collisions. Note that the
++ * rw_clr_err register clears both underrun and excessive collision
++ * errors, so there's no need to check them separately.
++ */
++ if (np->sender_started
++ && (intr_mask.urun || intr_mask.exc_col)) {
++ unsigned long flags;
++
++ /* Underrun are considered to be tx-errors. */
++ np->stats.tx_errors += intr_mask.urun;
++ np->stats.tx_fifo_errors += intr_mask.urun;
++
++ /*
++ * Protect against the tx-interrupt messing with
++ * the tx-ring.
++ */
++ spin_lock_irqsave(&np->lock, flags);
++
++ /* DMA should have stopped now, eat from the ring before
++ removing anything due to tx errors. */
++ _crisv32_tx_ring_advance(np, 0);
++
++ /*
++ * Drop packets after 15 retries.
++ * TODO: Add backoff.
++ */
++ if (np->retrans > 15 && np->txpackets) {
++ dev_kfree_skb_irq(np->catch_tx_desc->skb);
++ np->catch_tx_desc->skb = 0;
++ np->catch_tx_desc->descr.buf = 0;
++ np->catch_tx_desc =
++ phys_to_virt((int)
++ np->catch_tx_desc->descr.next);
++ flush_dma_descr(&np->catch_tx_desc->descr, 0);
++
++ np->txpackets--;
++ np->retrans = 0;
++ netif_wake_queue(dev);
++ np->stats.tx_dropped++;
++ }
++ np->ctxt_out.next = 0;
++ if (np->txpackets) {
++ np->retrans++;
++ np->ctxt_out.saved_data = (void *)
++ virt_to_phys(&np->catch_tx_desc->descr);
++ np->ctxt_out.saved_data_buf =
++ np->catch_tx_desc->descr.buf;
++ WARN_ON(!np->ctxt_out.saved_data_buf);
++ flush_dma_descr(&np->catch_tx_desc->descr, 0);
++ cris_flush_cache_range(&np->ctxt_out,
++ sizeof np->ctxt_out);
++
++ /* restart the DMA */
++ DMA_START_CONTEXT(np->dma_out_inst,
++ (int) virt_to_phys(&np->ctxt_out));
++ np->sender_started = 1;
++ }
++ else {
++ /* Load dummy context but do not load the data
++ descriptor nor start the burst. This brings the
++ buggy eth transmitter back in sync with the DMA
++ avoiding malformed frames. */
++ REG_WR(dma, np->dma_out_inst, rw_group_down,
++ (int) virt_to_phys(&np->ctxt_out));
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
++ np->sender_started = 0;
++ }
++ crisv32_eth_roll_tx_timer(np);
++ spin_unlock_irqrestore(&np->lock, flags);
++ }
++
++ ack_intr = *(u32 *)&intr_mask;
++ REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
++ clr_err.clr = 1;
++ REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
++
++ update_rx_stats(np);
++ update_tx_stats(np);
++
++ return IRQ_HANDLED;
++}
++
++/* We have a good packet(s), get it/them out of the buffers. */
++static int crisv32_eth_receive_packet(struct net_device *dev)
++{
++ int length;
++ struct sk_buff *skb;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ struct sk_buff *tmp;
++ unsigned long flags;
++
++ DEBUG(printk("crisv32_receive_packet\n"));
++
++ /* Roll the rx bug timer. */
++ mod_timer(&np->receive_timer, jiffies + HZ);
++
++ /* Activate LED */
++ spin_lock_irqsave(&np->leds->led_lock, flags);
++ if (!np->leds->led_active && time_after(jiffies,
++ np->leds->led_next_time)) {
++ /* light the network leds depending on the current speed. */
++ crisv32_set_network_leds(CRIS_LED_ACTIVITY, dev);
++
++ /* Set the earliest time we may clear the LED */
++ np->leds->led_next_time = jiffies + NET_FLASH_TIME;
++ np->leds->led_active = 1;
++ np->leds->clear_led_timer.data = (unsigned long) dev;
++ mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
++ }
++ spin_unlock_irqrestore(&np->leds->led_lock, flags);
++
++ /* Discard CRC (4 bytes). */
++ length = (np->active_rx_desc->descr.after) -
++ (np->active_rx_desc->descr.buf) - 4;
++
++ tmp = dev_alloc_skb(MAX_MEDIA_DATA_SIZE);
++ if (!tmp) {
++ np->stats.rx_errors++;
++ printk(KERN_NOTICE "%s: memory squeeze,"
++ " dropping packet.",
++ dev->name);
++ return 0;
++ }
++ skb = np->active_rx_desc->skb;
++ np->active_rx_desc->skb = tmp;
++ skb_put(skb, length);
++
++ np->newbuf = virt_to_phys(np->active_rx_desc->skb->data);
++
++ skb->dev = dev;
++ skb->protocol = eth_type_trans(skb, dev);
++ skb->ip_summed = CHECKSUM_NONE;
++
++ np->stats.multicast += skb->pkt_type == PACKET_MULTICAST;
++ /* Send the packet to the upper layer. */
++ netif_receive_skb(skb);
++ np->last_rx_desc =
++ phys_to_virt((int)
++ np->last_rx_desc->descr.next);
++
++ /* Forward rotate the receive ring. */
++ crisv32_eth_rx_ring_advance(np);
++ return length;
++}
++
++/* Must be called with the np-lock held. */
++static void
++__crisv32_eth_restart_rx_dma(struct net_device* dev,
++ struct crisv32_ethernet_local *np)
++{
++ reg_dma_rw_ack_intr ack_intr = {0};
++ reg_dma_rw_stream_cmd dma_sc = {0};
++ reg_dma_rw_stat stat;
++ int resets = 0;
++ reg_eth_rw_intr_mask eth_intr_mask;
++
++ np->rx_dma_restarts++;
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ if (np->pending_overrun) {
++ np->pending_overrun = 0;
++ REG_WR_INT(eth, np->eth_inst, rw_ga_lo, np->saved_ga_lo);
++ REG_WR_INT(eth, np->eth_inst, rw_ga_hi, np->saved_ga_hi);
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, np->saved_rec_ctrl);
++ REG_WR_INT(eth, np->eth_inst, rw_intr_mask,
++ REG_RD_INT(eth, regi_eth, rw_intr_mask) | 0x400);
++ DMA_CONTINUE(np->dma_in_inst);
++ }
++#endif
++ /* Bring down the receiver. */
++ crisv32_disable_rx_ints(np);
++ crisv32_disconnect_eth_rx_dma(np);
++
++ /* Stop DMA and ack possible ints. */
++ DMA_STOP(np->dma_in_inst);
++ ack_intr.in_eop = 1;
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++
++ crisv32_stop_receiver(np);
++
++ /* Disable overrun interrupts while receive is shut off. */
++ eth_intr_mask = REG_RD(eth, np->eth_inst, rw_intr_mask);
++ eth_intr_mask.orun = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_intr_mask, eth_intr_mask);
++ /* ACK overrun. */
++ REG_WR_INT(eth, np->eth_inst, rw_ack_intr, 0x400);
++
++ crisv32_eth_reset_rx_ring(dev);
++ reset:
++ /* TODO: if nr resets grows to high we should reboot. */
++ if (resets++ > 0)
++ printk("reset DMA %d.\n", resets);
++
++ DMA_RESET(np->dma_in_inst);
++ /* Wait for the channel to reset. */
++ do {
++ stat = REG_RD(dma, np->dma_in_inst, rw_stat);
++ } while (stat.mode != regk_dma_rst);
++
++ /* Now bring the rx path back up. */
++ DMA_ENABLE(np->dma_in_inst);
++ if (dma_wait_busy(np->dma_in_inst, 100))
++ goto reset;
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++// DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2);
++ dma_sc.cmd = (regk_dma_set_w_size2);
++ REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
++ if (dma_wait_busy(np->dma_in_inst, 100))
++ goto reset;
++#endif
++
++// DMA_START_CONTEXT(np->dma_in_inst, virt_to_phys(&np->ctxt_in));
++ REG_WR_INT(dma, np->dma_in_inst, rw_group_down, (int)&np->ctxt_in);
++
++// DMA_WR_CMD(np->dma_in_inst, regk_dma_load_c);
++ dma_sc.cmd = (regk_dma_load_c);
++ REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
++ if (dma_wait_busy(np->dma_in_inst, 100))
++ goto reset;
++
++// DMA_WR_CMD(np->dma_in_inst, regk_dma_load_d | regk_dma_burst);
++ dma_sc.cmd = (regk_dma_load_d | regk_dma_burst);
++ REG_WR(dma, np->dma_in_inst, rw_stream_cmd, dma_sc);
++
++ if (dma_wait_busy(np->dma_in_inst, 100))
++ goto reset;
++
++ /* Now things get critical again. Don't give us any interrupts until
++ the following sequence is complete. */
++ DMA_CONTINUE(np->dma_in_inst);
++ np->overrun_set = 0;
++ crisv32_enable_rx_ints(np);
++ crisv32_start_receiver(np);
++
++ /* Reenable overrun interrupts when receive is started again. */
++ eth_intr_mask = REG_RD(eth, np->eth_inst, rw_intr_mask);
++ eth_intr_mask.orun = regk_eth_yes;
++ REG_WR(eth, np->eth_inst, rw_intr_mask, eth_intr_mask);
++
++ crisv32_connect_eth_rx_dma(np);
++}
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++static void
++crisv32_eth_restart_rx_dma(struct net_device* dev,
++ struct crisv32_ethernet_local *np)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->lock, flags);
++ __crisv32_eth_restart_rx_dma(dev, np);
++ spin_unlock_irqrestore(&np->lock, flags);
++}
++#endif
++
++/*
++ * Is there work to do in the rx-path?
++ */
++static inline int crisv32_has_rx_work(struct crisv32_ethernet_local *np,
++ dma_descr_data *active)
++{
++ int mw;
++ mw = (active->in_eop && np->new_rx_package);
++ return mw;
++}
++
++static void crisv32_eth_do_rx_recovery(struct net_device* dev,
++ struct crisv32_ethernet_local *np)
++{
++ unsigned long flags;
++ static int r = 0;
++
++ r++;
++
++ /* Bring down the receiver. */
++ spin_lock_irqsave(&np->lock, flags);
++ if (!np->do_rx_recovery)
++ goto done;
++
++ napi_disable(&np->napi);
++
++ np->rx_dma_timeouts++;
++
++ __crisv32_eth_restart_rx_dma(dev, np);
++
++ np->do_rx_recovery = 0;
++
++ napi_enable(&np->napi);
++ done:
++ spin_unlock_irqrestore(&np->lock, flags);
++
++ WARN_ON(r != 1);
++ r--;
++}
++
++static void receive_timeout_work(struct work_struct* work)
++{
++ struct dma_descr_data* descr;
++ struct dma_descr_data* descr2;
++ struct net_device* dev = crisv32_dev[0];
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_eth_r_intr intr_mask;
++
++ descr = &np->active_rx_desc->descr;
++ descr2 = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
++
++ intr_mask = REG_RD(eth, np->eth_inst, r_intr);
++
++ if (!np->overrun_set
++ && !intr_mask.orun
++ && !descr->in_eop
++ && !descr2->in_eop)
++ return;
++
++ crisv32_eth_do_rx_recovery(dev, np);
++}
++
++static void receive_timeout(unsigned long arg)
++{
++ struct net_device* dev = (struct net_device*)arg;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ np->do_rx_recovery++;
++ schedule_work(&np->receive_work);
++ mod_timer(&np->receive_timer, jiffies + 1*HZ);
++}
++
++static void transmit_timeout(unsigned long arg)
++{
++ struct net_device* dev = (struct net_device*)arg;
++ crisv32_eth_do_tx_recovery(dev);
++}
++
++/*
++ * NAPI poll
++ *
++ * We are allowed to pull up to budget number of frames from the rx ring.
++ * If we are done, remove us from the poll list and re-enable rx interrupts.
++ * Always return number of pulled frames from the rx ring.
++ */
++static int crisv32_eth_poll(struct napi_struct *napi, int budget)
++{
++ struct crisv32_ethernet_local *np;
++ int work_done = 0;
++ int morework;
++ int rx_bytes = 0;
++ reg_dma_rw_ack_intr ack_intr = {0};
++
++ np = container_of(napi, struct crisv32_ethernet_local, napi);
++ crisv32_disable_eth_ints(np);
++ np->napi_processing = 1;
++ ack_intr.in_eop = 1;
++
++ if (np->new_rx_package == 0) {
++ /*
++ * In the previous round we pulled a packet from the ring but
++ * we didn't advance the ring due to hw DMA bug. Try to do it
++ * now.
++ */
++ np->new_rx_package = 1;
++ crisv32_eth_rx_ring_advance(np);
++ }
++
++ morework = crisv32_has_rx_work(np, &np->active_rx_desc->descr);
++
++ /* See if tx needs attention. */
++ crisv32_tx_complete(np);
++
++ while (morework)
++ {
++ rx_bytes += crisv32_eth_receive_packet(np->dev);
++ work_done++;
++
++ /* Ack irq and restart rx dma */
++ REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr);
++ DMA_CONTINUE_DATA(np->dma_in_inst);
++
++ if (unlikely(work_done >= budget))
++ break;
++
++ /* See if tx needs attention. */
++ crisv32_tx_complete(np);
++
++ morework = crisv32_has_rx_work(np, &np->active_rx_desc->descr);
++ }
++ crisv32_enable_eth_ints(np);
++
++ if (!morework) {
++ np->napi_processing = 0;
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ if (np->pending_overrun) {
++ crisv32_eth_restart_rx_dma(np->dev, np);
++ }
++#endif
++ if (irqs_disabled())
++ printk("WARNING: %s irqs disabled!\n", __func__);
++
++ if (work_done < budget) {
++ /* first mark as done, then enable irq's */
++ napi_complete(napi);
++ crisv32_enable_rx_ints(np);
++ crisv32_enable_tx_ints(np);
++ }
++ }
++ np->napi_processing = 0;
++
++ np->stats.rx_bytes += rx_bytes;
++ np->stats.rx_packets += work_done;
++ update_rx_stats(np);
++ return work_done;
++}
++
++/*
++ * This function (i.e. hard_start_xmit) is protected from concurent calls by a
++ * spinlock (xmit_lock) in the net_device structure.
++ */
++static int
++crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ unsigned char *buf = skb->data;
++ unsigned long flags;
++
++ /*
++ * Need to disable irq to avoid updating pointer in interrupt while
++ * sending packets.
++ */
++ spin_lock_irqsave(&np->lock, flags);
++
++ np->active_tx_desc->skb = skb;
++ crisv32_eth_hw_send_packet(buf, skb->len, np);
++
++ dev->trans_start = jiffies;
++
++ /* Stop queue if full. */
++ if (crisv32_eth_tx_ring_full(np))
++ netif_stop_queue(dev);
++
++ np->txpackets++;
++ crisv32_eth_roll_tx_timer(np);
++ spin_unlock_irqrestore(&np->lock, flags);
++
++ spin_lock_irqsave(&np->leds->led_lock, flags);
++ if (!np->leds->led_active && time_after(jiffies,
++ np->leds->led_next_time)) {
++ /* light the network leds depending on the current speed. */
++ crisv32_set_network_leds(CRIS_LED_ACTIVITY, dev);
++
++ /* Set the earliest time we may clear the LED */
++ np->leds->led_next_time = jiffies + NET_FLASH_TIME;
++ np->leds->led_active = 1;
++ np->leds->clear_led_timer.data = (unsigned long) dev;
++ mod_timer(&np->leds->clear_led_timer, jiffies + HZ/10);
++ }
++ spin_unlock_irqrestore(&np->leds->led_lock, flags);
++
++ return 0;
++}
++
++
++static void
++crisv32_eth_hw_send_packet(unsigned char *buf, int length, void *priv)
++{
++ struct crisv32_ethernet_local *np =
++ (struct crisv32_ethernet_local *) priv;
++
++ /* Configure the tx dma descriptor. */
++ np->active_tx_desc->descr.buf = (unsigned char *)virt_to_phys(buf);
++
++ np->active_tx_desc->descr.after = np->active_tx_desc->descr.buf +
++ length;
++ np->active_tx_desc->descr.intr = 1;
++ np->active_tx_desc->descr.out_eop = 1;
++
++ /* Move eol. */
++ np->active_tx_desc->descr.eol = 1;
++ flush_dma_descr(&np->active_tx_desc->descr, 1);
++
++ if (np->sender_started)
++ WARN_ON(!np->prev_tx_desc->descr.eol);
++ np->prev_tx_desc->descr.eol = 0;
++ flush_dma_descr(&np->prev_tx_desc->descr, 0);
++
++ /* Update pointers. */
++ np->prev_tx_desc = np->active_tx_desc;
++ np->active_tx_desc = phys_to_virt((int)np->active_tx_desc->descr.next);
++
++ /* Start DMA. */
++ crisv32_start_dma_out(np);
++}
++
++static void crisv32_start_dma_out(struct crisv32_ethernet_local *np)
++{
++ if (!np->sender_started) {
++ /* Start DMA for the first time. */
++ np->ctxt_out.saved_data =
++ (void *)virt_to_phys(&np->prev_tx_desc->descr);
++ np->ctxt_out.saved_data_buf = np->prev_tx_desc->descr.buf;
++ WARN_ON(!np->ctxt_out.saved_data_buf);
++
++ cris_flush_cache_range(&np->ctxt_out, sizeof np->ctxt_out);
++ REG_WR(dma, np->dma_out_inst, rw_group_down,
++ (int) virt_to_phys(&np->ctxt_out));
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
++ np->sender_started = 1;
++ } else {
++ DMA_CONTINUE_DATA(np->dma_out_inst);
++ }
++}
++
++/*
++ * Bring the transmitter back to life.
++ */
++static void
++crisv32_eth_do_tx_recovery(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_eth_rw_clr_err clr_err;
++ reg_dma_rw_stat stat = {0};
++ unsigned long flags;
++ /* ACK urun and exc_col. */
++ int ack_intr = 0x1800;
++ int do_full;
++
++ /* Give the tx recovery some time without link state polling. */
++ if (!np->fixed_phy)
++ mod_timer(&np->speed_timer, jiffies + 4 * HZ);
++
++ np->tx_dma_restarts++;
++
++ spin_lock_irqsave(&np->lock, flags);
++
++ do_full = 1;
++ update_tx_stats(np);
++
++ /* Cancel ongoing frame. */
++ crisv32_eth_tx_cancel_frame(np);
++
++ /* In case TR 125 just hit us. */
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_ack_pkt);
++ dma_wait_busy(np->dma_out_inst, 100);
++
++ /* At this point, the transmit block should be idle or waiting for us
++ to clear the excessive collision error. Let's reset the DMA. */
++ DMA_STOP(np->dma_out_inst);
++
++ crisv32_disconnect_eth_tx_dma(np);
++
++ /* Eat from the tx ring. */
++ _crisv32_tx_ring_advance(np, 1);
++ np->do_tx_recovery++;
++
++ DMA_RESET(np->dma_out_inst);
++ do {
++ stat = REG_RD(dma, np->dma_out_inst, rw_stat);
++ } while (stat.mode != regk_dma_rst);
++
++ /* Next packet will restart output DMA. */
++ np->sender_started = 0;
++
++ crisv32_enable_tx_ints(np);
++
++ DMA_ENABLE(np->dma_out_inst);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4);
++#endif
++ DMA_CONTINUE(np->dma_out_inst);
++
++ /* Clear pending errors. */
++ REG_WR_INT(eth, np->eth_inst, rw_ack_intr, ack_intr);
++ clr_err.clr = 1;
++ REG_WR(eth, np->eth_inst, rw_clr_err, clr_err);
++
++ /* Do a full reset of the MAC block. */
++ if (do_full) {
++ np->tx_mac_resets++;
++ crisv32_eth_reset(np);
++ }
++
++ crisv32_connect_eth_tx_dma(np);
++
++ if (np->txpackets) {
++ WARN_ON(!np->catch_tx_desc->skb);
++ np->catch_tx_desc->descr.intr = 1;
++ np->catch_tx_desc->descr.out_eop = 1;
++
++ /* Start DMA for the first time. */
++ np->ctxt_out.saved_data =
++ (void *)virt_to_phys(&np->catch_tx_desc->descr);
++ np->ctxt_out.saved_data_buf = np->catch_tx_desc->descr.buf;
++ WARN_ON(!np->ctxt_out.saved_data_buf);
++ flush_dma_descr(&np->catch_tx_desc->descr, 0);
++ cris_flush_cache_range(&np->ctxt_out, sizeof np->ctxt_out);
++
++ REG_WR(dma, np->dma_out_inst, rw_group_down,
++ (int) virt_to_phys(&np->ctxt_out));
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_load_c);
++ DMA_WR_CMD(np->dma_out_inst, regk_dma_load_d | regk_dma_burst);
++ crisv32_eth_roll_tx_timer(np);
++ np->sender_started = 1;
++ }
++
++ if (np->txpackets && crisv32_eth_tx_ring_full(np))
++ netif_stop_queue(dev);
++ else
++ netif_wake_queue(dev);
++
++ spin_unlock_irqrestore(&np->lock, flags);
++}
++
++/*
++ * Set or clear the multicast filter for this adaptor.
++ * num_addrs == -1 Promiscuous mode, receive all packets
++ * num_addrs == 0 Normal mode, clear multicast list
++ * num_addrs > 0 Multicast mode, receive normal and MC packets,
++ * and do best-effort filtering.
++ */
++static void crisv32_eth_set_rx_mode(struct net_device *dev)
++{
++ int num_addr = netdev_mc_count(dev);
++ unsigned long int lo_bits;
++ unsigned long int hi_bits;
++ reg_eth_rw_rec_ctrl rec_ctrl = {0};
++ reg_eth_rw_ga_lo ga_lo = {0};
++ reg_eth_rw_ga_hi ga_hi = {0};
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ if (dev->flags & IFF_PROMISC) {
++ /* Promiscuous mode. */
++ lo_bits = 0xfffffffful;
++ hi_bits = 0xfffffffful;
++
++ /* Enable individual receive. */
++ rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
++ rw_rec_ctrl);
++ rec_ctrl.individual = regk_eth_yes;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ } else if (dev->flags & IFF_ALLMULTI) {
++ /* Enable all multicasts. */
++ lo_bits = 0xfffffffful;
++ hi_bits = 0xfffffffful;
++
++ /* Disable individual receive */
++ rec_ctrl =
++ (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.individual = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ } else if (num_addr == 0) {
++ /* Normal, clear the mc list. */
++ lo_bits = 0x00000000ul;
++ hi_bits = 0x00000000ul;
++
++ /* Disable individual receive */
++ rec_ctrl =
++ (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.individual = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ } else {
++ /* MC mode, receive normal and MC packets. */
++ char hash_ix;
++ struct netdev_hw_addr *ha;
++ char *baddr;
++ lo_bits = 0x00000000ul;
++ hi_bits = 0x00000000ul;
++
++ netdev_for_each_mc_addr(ha, dev) {
++ /* Calculate the hash index for the GA registers. */
++ hash_ix = 0;
++ baddr = ha->addr;
++ hash_ix ^= (*baddr) & 0x3f;
++ hash_ix ^= ((*baddr) >> 6) & 0x03;
++ ++baddr;
++ hash_ix ^= ((*baddr) << 2) & 0x03c;
++ hash_ix ^= ((*baddr) >> 4) & 0xf;
++ ++baddr;
++ hash_ix ^= ((*baddr) << 4) & 0x30;
++ hash_ix ^= ((*baddr) >> 2) & 0x3f;
++ ++baddr;
++ hash_ix ^= (*baddr) & 0x3f;
++ hash_ix ^= ((*baddr) >> 6) & 0x03;
++ ++baddr;
++ hash_ix ^= ((*baddr) << 2) & 0x03c;
++ hash_ix ^= ((*baddr) >> 4) & 0xf;
++ ++baddr;
++ hash_ix ^= ((*baddr) << 4) & 0x30;
++ hash_ix ^= ((*baddr) >> 2) & 0x3f;
++
++ hash_ix &= 0x3f;
++
++ if (hash_ix > 32)
++ hi_bits |= (1 << (hash_ix - 32));
++ else
++ lo_bits |= (1 << hash_ix);
++ }
++
++ /* Disable individual receive. */
++ rec_ctrl =
++ (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst, rw_rec_ctrl);
++ rec_ctrl.individual = regk_eth_no;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ }
++
++ ga_lo.table = (unsigned int) lo_bits;
++ ga_hi.table = (unsigned int) hi_bits;
++
++ REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo);
++ REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi);
++}
++
++static int
++crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
++{
++ struct mii_ioctl_data *data = if_mii(ifr);
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int old_autoneg;
++ int rc = 0;
++
++ spin_lock(&np->lock); /* Preempt protection */
++ switch (cmd) {
++ case SET_ETH_ENABLE_LEDS:
++ np->use_leds = 1;
++ break;
++ case SET_ETH_DISABLE_LEDS:
++ np->use_leds = 0;
++ break;
++ case SET_ETH_AUTONEG:
++ old_autoneg = np->autoneg_normal;
++ np->autoneg_normal = *(int*)data;
++ if (np->autoneg_normal != old_autoneg)
++ crisv32_eth_negotiate(dev);
++ break;
++ default:
++ rc = generic_mii_ioctl(&np->mii_if,
++ if_mii(ifr), cmd, NULL);
++ break;
++ }
++ spin_unlock(&np->lock);
++ return rc;
++}
++
++static int crisv32_eth_get_settings(struct net_device *dev,
++ struct ethtool_cmd *cmd)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int err;
++
++ spin_lock_irq(&np->lock);
++ err = mii_ethtool_gset(&np->mii_if, cmd);
++ spin_unlock_irq(&np->lock);
++
++ /* The PHY may support 1000baseT, but the EtraxFS does not. */
++ cmd->supported &= ~(SUPPORTED_1000baseT_Half
++ | SUPPORTED_1000baseT_Full);
++ return err;
++}
++
++static int crisv32_eth_set_settings(struct net_device *dev,
++ struct ethtool_cmd *ecmd)
++{
++ if (ecmd->autoneg == AUTONEG_ENABLE) {
++ crisv32_eth_set_duplex(dev, autoneg);
++ crisv32_eth_set_speed(dev, 0);
++ } else {
++ crisv32_eth_set_duplex(dev, ecmd->duplex);
++ crisv32_eth_set_speed(dev, ecmd->speed);
++ }
++
++ return 0;
++}
++
++static void crisv32_eth_get_drvinfo(struct net_device *dev,
++ struct ethtool_drvinfo *info)
++{
++#ifdef CONFIG_ETRAXFS
++ strncpy(info->driver, "ETRAX FS", sizeof(info->driver) - 1);
++#else
++ strncpy(info->driver, "ARTPEC-3", sizeof(info->driver) - 1);
++#endif
++ strncpy(info->version, "$Revision: 1.197 $", sizeof(info->version) - 1);
++ strncpy(info->fw_version, "N/A", sizeof(info->fw_version) - 1);
++ strncpy(info->bus_info, "N/A", sizeof(info->bus_info) - 1);
++}
++
++static int crisv32_eth_get_ethtool_sset_count(struct net_device *dev,
++ int stringset)
++{
++ if (stringset != ETH_SS_STATS)
++ return -EINVAL;
++
++ return ARRAY_SIZE(ethtool_stats_keys);
++}
++
++static void crisv32_eth_get_ethtool_stats(struct net_device *dev,
++ struct ethtool_stats *stats,
++ u64 *data)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data[0] = np->tx_dma_restarts;
++ data[1] = np->tx_mac_resets;
++ data[2] = np->rx_dma_restarts;
++ data[3] = np->rx_dma_timeouts;
++ data[4] = np->rx_restarts_dropped;
++}
++
++static void crisv32_eth_get_strings(struct net_device *dev,
++ u32 stringset, u8 *data)
++{
++ switch (stringset) {
++ case ETH_SS_STATS:
++ memcpy(data, &ethtool_stats_keys,
++ sizeof(ethtool_stats_keys));
++ break;
++ default:
++ WARN_ON(1);
++ break;
++ }
++}
++
++static int crisv32_eth_nway_reset(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ if (np->current_duplex == autoneg && np->current_speed_selection == 0)
++ crisv32_eth_negotiate(dev);
++ return 0;
++}
++/* The FS/A3 ethernet block has 23 32-bit config registers. */
++/* plus 2 dma_descr_context */
++/* plus 2 sets of ring pointers (active, prev, last) */
++/* plus 2 sets of DMA registers 40*4 bytes = 0xA0 */
++#define ETRAX_ETH_REGDUMP_LEN (23 * 4 + 2 * sizeof (dma_descr_context) + 2*3*4 + 2*0xA0)
++static int crisv32_eth_get_regs_len(struct net_device *dev)
++{
++ return ETRAX_ETH_REGDUMP_LEN;
++}
++
++static void crisv32_eth_get_regs(struct net_device *dev,
++ struct ethtool_regs *regs, void *_p)
++{
++ u32 *p = _p;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int i;
++
++ /* Let's call this major version 0, minor version 1 with some
++ * undecided field separation in the version data. Previously
++ * only the eth regs were dumped (version=0: maj 0, min 0).*/
++ regs->version = 1;
++ memset(p, 0, ETRAX_ETH_REGDUMP_LEN);
++
++#define GET_REG32_LOOP(base, len) \
++ do { \
++ for (i = 0; i < len; i += 4) \
++ *(p)++ = REG_READ(u32, (base) + i); \
++ } while (0)
++
++ GET_REG32_LOOP(np->eth_inst, 0x30);
++ /* Do not dump registers with read side effects. */
++ GET_REG32_LOOP(np->eth_inst + 0x34, 1);
++ GET_REG32_LOOP(np->eth_inst + 0x3c, 1);
++ GET_REG32_LOOP(np->eth_inst + 0x44, 0x5c - 0x44);
++
++
++ memcpy(p, &np->ctxt_out, sizeof (dma_descr_context));
++ p += sizeof (dma_descr_context)/4;
++ *(p++) = (u32) np->active_tx_desc;
++ *(p++) = (u32) np->prev_tx_desc;
++ *(p++) = (u32) np->catch_tx_desc;
++
++ GET_REG32_LOOP(np->dma_out_inst, 0xa0);
++
++ memcpy(p, &np->ctxt_in, sizeof (dma_descr_context));
++ p += sizeof (dma_descr_context)/4;
++ *(p++) = (u32)np->active_rx_desc;
++ *(p++) = (u32)np->prev_rx_desc;
++ *(p++) = (u32)np->last_rx_desc;
++
++ GET_REG32_LOOP(np->dma_in_inst, 0xa0);
++#undef GET_REG32_LOOP
++}
++
++static struct ethtool_ops crisv32_ethtool_ops = {
++ .get_settings = crisv32_eth_get_settings,
++ .set_settings = crisv32_eth_set_settings,
++ .get_drvinfo = crisv32_eth_get_drvinfo,
++ .get_regs_len = crisv32_eth_get_regs_len,
++ .get_regs = crisv32_eth_get_regs,
++ .nway_reset = crisv32_eth_nway_reset,
++ .get_link = ethtool_op_get_link,
++ .get_strings = crisv32_eth_get_strings,
++ .get_ethtool_stats = crisv32_eth_get_ethtool_stats,
++ .get_sset_count = crisv32_eth_get_ethtool_sset_count
++};
++
++/* Is this function really needed? Use ethtool instead? */
++static int crisv32_eth_set_config(struct net_device *dev, struct ifmap *map)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ spin_lock(&np->lock); /* Preempt protection */
++
++ switch (map->port) {
++ case IF_PORT_UNKNOWN:
++ /* Use autoneg */
++ crisv32_eth_set_speed(dev, 0);
++ crisv32_eth_set_duplex(dev, autoneg);
++ break;
++ case IF_PORT_10BASET:
++ crisv32_eth_set_speed(dev, 10);
++ crisv32_eth_set_duplex(dev, autoneg);
++ break;
++ case IF_PORT_100BASET:
++ case IF_PORT_100BASETX:
++ crisv32_eth_set_speed(dev, 100);
++ crisv32_eth_set_duplex(dev, autoneg);
++ break;
++ case IF_PORT_100BASEFX:
++ case IF_PORT_10BASE2:
++ case IF_PORT_AUI:
++ spin_unlock(&np->lock);
++ return -EOPNOTSUPP;
++ break;
++ default:
++ printk(KERN_ERR "%s: Invalid media selected",
++ dev->name);
++ spin_unlock(&np->lock);
++ return -EINVAL;
++ }
++ spin_unlock(&np->lock);
++ return 0;
++}
++
++static void crisv32_eth_negotiate(struct net_device *dev)
++{
++ unsigned short data;
++ unsigned short ctrl1000;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
++ ctrl1000 = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MII_CTRL1000);
++
++ /* Make all capabilities available */
++ data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
++ ADVERTISE_100HALF | ADVERTISE_100FULL;
++ ctrl1000 |= ADVERTISE_1000HALF | ADVERTISE_1000FULL;
++
++ /* Remove the speed capabilities that we that do not want */
++ switch (np->current_speed_selection) {
++ case 10 :
++ data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL);
++ ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
++ break;
++ case 100 :
++ data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL);
++ ctrl1000 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
++ break;
++ case 1000 :
++ data &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
++ ADVERTISE_100HALF | ADVERTISE_100FULL);
++ break;
++ }
++
++ /* Remove the duplex capabilites that we do not want */
++ if (np->current_duplex == full) {
++ data &= ~(ADVERTISE_10HALF | ADVERTISE_100HALF);
++ ctrl1000 &= ~(ADVERTISE_1000HALF);
++ }
++ else if (np->current_duplex == half) {
++ data &= ~(ADVERTISE_10FULL | ADVERTISE_100FULL);
++ ctrl1000 &= ~(ADVERTISE_1000FULL);
++ }
++
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
++ MII_CTRL1000, ctrl1000);
++#endif
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
++ if (np->autoneg_normal) {
++ /* Renegotiate with link partner */
++ data |= BMCR_ANENABLE | BMCR_ANRESTART;
++ } else {
++ /* Don't negitiate speed or duplex */
++ data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
++
++ /* Set speed and duplex static */
++ if (np->current_speed_selection == 10) {
++ data &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
++ }
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ else if (np->current_speed_selection == 1000) {
++ data &= ~BMCR_SPEED100;
++ data |= BMCR_SPEED1000;
++ }
++#endif
++ else {
++ data |= BMCR_SPEED100;
++ data &= ~BMCR_SPEED1000;
++ }
++
++ if (np->current_duplex != full) {
++ data &= ~BMCR_FULLDPLX;
++ } else {
++ data |= BMCR_FULLDPLX;
++ }
++ }
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
++}
++
++static void crisv32_eth_check_speed(unsigned long idev)
++{
++#ifndef CONFIG_ETRAX_NO_PHY
++ static int led_initiated = 0;
++ struct net_device *dev = (struct net_device *) idev;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ unsigned long data;
++ int old_speed;
++ unsigned long flags;
++
++ BUG_ON(!np);
++ BUG_ON(!np->transceiver);
++
++ spin_lock(&np->transceiver_lock);
++
++ old_speed = np->current_speed;
++
++ /* Do a fake read. This is needed for DM9161, otherwise the link will
++ * go up and down all the time.
++ */
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
++
++ if (!(data & BMSR_LSTATUS))
++ np->current_speed = 0;
++ else
++ np->transceiver->check_speed(dev);
++
++ spin_lock_irqsave(&np->leds->led_lock, flags);
++ if ((old_speed != np->current_speed) || !led_initiated) {
++ led_initiated = 1;
++ np->leds->clear_led_timer.data = (unsigned long) dev;
++ if (np->current_speed) {
++ if (!np->link)
++ netif_carrier_on(dev);
++ crisv32_set_network_leds(CRIS_LED_LINK, dev);
++ np->link = 1;
++ } else {
++ if (np->link)
++ netif_carrier_off(dev);
++ crisv32_set_network_leds(CRIS_LED_NOLINK, dev);
++ np->link = 0;
++ }
++ }
++ spin_unlock_irqrestore(&np->leds->led_lock, flags);
++
++ /* Reinitialize the timer. */
++ np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
++ add_timer(&np->speed_timer);
++
++ spin_unlock(&np->transceiver_lock);
++#endif
++}
++
++static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ spin_lock(&np->transceiver_lock);
++ if (np->current_speed_selection != speed) {
++ np->current_speed_selection = speed;
++ crisv32_eth_negotiate(dev);
++ }
++ spin_unlock(&np->transceiver_lock);
++}
++
++static void crisv32_eth_check_duplex(unsigned long idev)
++{
++#ifndef CONFIG_ETRAX_NO_PHY
++ struct net_device *dev = (struct net_device *) idev;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ reg_eth_rw_rec_ctrl rec_ctrl;
++ int old_duplex = np->full_duplex;
++
++ np->transceiver->check_duplex(dev);
++
++ if (old_duplex != np->full_duplex) {
++ /* Duplex changed. */
++ rec_ctrl = (reg_eth_rw_rec_ctrl) REG_RD(eth, np->eth_inst,
++ rw_rec_ctrl);
++ rec_ctrl.duplex = np->full_duplex;
++ REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl);
++ }
++
++ /* Reinitialize the timer. */
++ np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
++ add_timer(&np->duplex_timer);
++#endif
++}
++
++static void
++crisv32_eth_set_duplex(struct net_device *dev, enum duplex new_duplex)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ spin_lock(&np->transceiver_lock);
++ if (np->current_duplex != new_duplex) {
++ np->current_duplex = new_duplex;
++ crisv32_eth_negotiate(dev);
++ }
++ spin_unlock(&np->transceiver_lock);
++}
++
++static int crisv32_eth_probe_transceiver(struct net_device *dev)
++{
++#ifndef CONFIG_ETRAX_NO_PHY
++ unsigned int phyid_high;
++ unsigned int phyid_low;
++ unsigned int oui;
++ struct transceiver_ops *ops = NULL;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ /* Probe MDIO physical address. */
++ for (np->mii_if.phy_id = 0;
++ np->mii_if.phy_id <= 31; np->mii_if.phy_id++) {
++ if (crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR)
++ != 0xffff)
++ break;
++ }
++
++ if (np->mii_if.phy_id == 32)
++ return -ENODEV;
++
++ /* Get manufacturer. */
++ phyid_high = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MII_PHYSID1);
++ phyid_low = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MII_PHYSID2);
++
++ oui = (phyid_high << 6) | (phyid_low >> 10);
++
++ for (ops = &transceivers[0]; ops->oui; ops++) {
++ if (ops->oui == oui)
++ break;
++ }
++
++ np->transceiver = ops;
++
++ if (oui == DM9161_OUI) {
++ /* Do not bypass the scrambler/descrambler, this is needed
++ * to make 10Mbit work.
++ */
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
++ MII_DM9161_SCR,MII_DM9161_SCR_INIT);
++ /* Clear 10BTCSR to default */
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
++ MII_DM9161_10BTCSR,
++ MII_DM9161_10BTCSR_INIT);
++ }
++ return 0;
++#else
++ return -ENODEV;
++#endif
++}
++
++#ifndef CONFIG_ETRAX_NO_PHY
++static void generic_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
++ if ((data & ADVERTISE_100FULL) ||
++ (data & ADVERTISE_100HALF))
++ np->current_speed = 100;
++ else
++ np->current_speed = 10;
++}
++
++static void generic_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
++ if ((data & ADVERTISE_10FULL) ||
++ (data & ADVERTISE_100FULL))
++ np->full_duplex = 1;
++ else
++ np->full_duplex = 0;
++}
++
++static void broadcom_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_AUX_CTRL_STATUS_REG);
++ np->current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
++}
++
++static void broadcom_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_AUX_CTRL_STATUS_REG);
++ np->full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
++}
++
++static void tdk_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_TDK_DIAGNOSTIC_REG);
++ np->current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
++}
++
++static void tdk_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_TDK_DIAGNOSTIC_REG);
++ np->full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
++
++}
++
++static void intel_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_INT_STATUS_REG_2);
++ np->current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
++}
++
++static void intel_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_INT_STATUS_REG_2);
++ np->full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
++}
++
++static void national_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_NAT_LINK_AN_REG);
++ if (data & MDIO_NAT_1000)
++ np->current_speed = 1000;
++ else if (data & MDIO_NAT_100)
++ np->current_speed = 100;
++ else
++ np->current_speed = 10;
++}
++
++static void national_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_NAT_LINK_AN_REG);
++ if (data & MDIO_NAT_FULL_DUPLEX_IND)
++ np->full_duplex = 1;
++ else
++ np->full_duplex = 0;
++}
++
++static void vitesse_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_VIT_AUX_STAT);
++ if ((data & 0x18) == MDIO_VIT_1000)
++ np->current_speed = 1000;
++ else if ((data & 0x18) == MDIO_VIT_100)
++ np->current_speed = 100;
++ else
++ np->current_speed = 10;
++}
++
++static void vitesse_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MDIO_VIT_AUX_STAT);
++ if (data & 0x20)
++ np->full_duplex = 1;
++ else
++ np->full_duplex = 0;
++}
++
++static void davicom_check_speed(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
++ np->current_speed = (data & BMCR_SPEED100) ? 100 : 10;
++}
++
++static void davicom_check_duplex(struct net_device *dev)
++{
++ unsigned long data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
++ np->full_duplex = (data & BMCR_FULLDPLX) ? 1 : 0;
++}
++#endif
++
++#if 0
++static void crisv32_eth_reset_tranceiver(struct net_device *dev)
++{
++ int i;
++ unsigned short cmd;
++ unsigned short data;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
++
++ cmd = (MDIO_START << 14)
++ | (MDIO_WRITE << 12)
++ | (np->mii_if.phy_id << 7)
++ | (MII_BMCR << 2);
++
++ crisv32_eth_send_mdio_cmd(dev, cmd, 1);
++
++ data |= 0x8000;
++
++ /* Magic value is number of bits. */
++ for (i = 15; i >= 0; i--)
++ crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
++}
++#endif
++
++static int
++crisv32_eth_get_mdio_reg(struct net_device *dev, int phyid, int reg_num)
++{
++ int i;
++ unsigned short cmd; /* Data to be sent on MDIO port. */
++ unsigned short data; /* Data read from MDIO. */
++
++#ifdef CONFIG_ETRAX_NO_PHY
++ return 0;
++#endif
++
++ /* Start of frame, OP Code, Physical Address, Register Address. */
++ cmd = (MDIO_START << 14)
++ | (MDIO_READ << 12)
++ | (phyid << 7)
++ | (reg_num << 2);
++
++ crisv32_eth_send_mdio_cmd(dev, cmd, 0);
++
++ data = 0;
++
++ /* Receive data. Magic value is number of bits. */
++ for (i = 15; i >= 0; i--)
++ data |= (crisv32_eth_receive_mdio_bit(dev) << i);
++
++ return data;
++}
++
++static void
++crisv32_eth_set_mdio_reg(struct net_device *dev, int phyid, int reg, int value)
++{
++ int bitCounter;
++ unsigned short cmd;
++
++#ifdef CONFIG_ETRAX_NO_PHY
++ return;
++#endif
++ cmd = (MDIO_START << 14)
++ | (MDIO_WRITE << 12)
++ | (phyid << 7)
++ | (reg << 2);
++
++ crisv32_eth_send_mdio_cmd(dev, cmd, 1);
++
++ /* Data... */
++ for (bitCounter=15; bitCounter>=0 ; bitCounter--) {
++ crisv32_eth_send_mdio_bit(dev, GET_BIT(bitCounter, value));
++ }
++}
++
++static void
++crisv32_eth_send_mdio_cmd(struct net_device *dev, unsigned short cmd,
++ int write_cmd)
++{
++ int i;
++ unsigned char data = 0x2;
++
++ /* Preamble. Magic value is number of bits. */
++ for (i = 31; i >= 0; i--)
++ crisv32_eth_send_mdio_bit(dev, GET_BIT(i, MDIO_PREAMBLE));
++
++ for (i = 15; i >= 2; i--)
++ crisv32_eth_send_mdio_bit(dev, GET_BIT(i, cmd));
++
++ /* Turnaround. */
++ for (i = 1; i >= 0; i--)
++ if (write_cmd)
++ crisv32_eth_send_mdio_bit(dev, GET_BIT(i, data));
++ else
++ crisv32_eth_receive_mdio_bit(dev);
++}
++
++static void crisv32_eth_send_mdio_bit(struct net_device *dev, unsigned char bit)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ reg_eth_rw_mgm_ctrl mgm_ctrl = {
++ .mdoe = regk_eth_yes,
++ .mdio = bit & 1
++ };
++
++ REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
++
++ udelay(1);
++
++ mgm_ctrl.mdc = 1;
++ REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
++
++ udelay(1);
++}
++
++static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev)
++{
++ reg_eth_r_stat stat;
++ reg_eth_rw_mgm_ctrl mgm_ctrl = {0};
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++
++ REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
++ stat = REG_RD(eth, np->eth_inst, r_stat);
++
++ udelay(1);
++
++ mgm_ctrl.mdc = 1;
++ REG_WR(eth, np->eth_inst, rw_mgm_ctrl, mgm_ctrl);
++
++ udelay(1);
++ return stat.mdio;
++}
++
++static void crisv32_clear_network_leds(unsigned long priv)
++{
++ struct net_device *dev = (struct net_device *)priv;
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->leds->led_lock, flags);
++ if (np->leds->led_active && time_after(jiffies,
++ np->leds->led_next_time)) {
++ crisv32_set_network_leds(CRIS_LED_NOACTIVITY, dev);
++
++ /* Set the earliest time we may set the LED */
++ np->leds->led_next_time = jiffies + NET_FLASH_PAUSE;
++ np->leds->led_active = 0;
++ }
++ spin_unlock_irqrestore(&np->leds->led_lock, flags);
++}
++
++static void crisv32_set_network_leds(int active, struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ int light_leds = 0;
++
++ if (np->leds->ledgrp == CRIS_LED_GRP_NONE)
++ return;
++
++ if (!np->use_leds)
++ return;
++
++ if (active == CRIS_LED_NOLINK) {
++ if (dev == crisv32_dev[0])
++ np->leds->ifisup[0] = 0;
++ else
++ np->leds->ifisup[1] = 0;
++ }
++ else if (active == CRIS_LED_LINK) {
++ if (dev == crisv32_dev[0])
++ np->leds->ifisup[0] = 1;
++ else
++ np->leds->ifisup[1] = 1;
++#if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK)
++ light_leds = 1;
++ } else {
++ light_leds = (active == CRIS_LED_NOACTIVITY);
++#elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY)
++ light_leds = 0;
++ } else {
++ light_leds = (active == CRIS_LED_ACTIVITY);
++#else
++#error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY"
++#endif
++ }
++}
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void crisv32_netpoll(struct net_device *netdev)
++{
++ crisv32rx_eth_interrupt(DMA0_INTR_VECT, netdev);
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++static int crisv32_ethernet_freq_notifier(struct notifier_block *nb,
++ unsigned long val, void *data)
++{
++ struct cpufreq_freqs *freqs = data;
++ int i;
++ if (val != CPUFREQ_POSTCHANGE)
++ return 0;
++
++ for (i = 0; i < 2; i++) {
++ struct net_device *dev = crisv32_dev[i];
++ unsigned short data;
++ if (dev == NULL)
++ continue;
++
++ data = crisv32_eth_get_mdio_reg(dev, np->mii_if.phy_id,
++ MII_BMCR);
++ if (freqs->new == 200000)
++ data &= ~BMCR_PDOWN;
++ else
++ data |= BMCR_PDOWN;
++ crisv32_eth_set_mdio_reg(dev, np->mii_if.phy_id,
++ MII_BMCR, data);
++ }
++ return 0;
++}
++#endif
++
++#if 0
++/*
++ * Must be called with the np->lock held.
++ */
++static void crisv32_ethernet_bug(struct net_device *dev)
++{
++ struct crisv32_ethernet_local *np = netdev_priv(dev);
++ dma_descr_data *dma_pos;
++ dma_descr_data *in_dma_pos;
++ reg_dma_rw_stat stat = {0};
++ reg_dma_rw_stat in_stat = {0};
++ int i;
++
++ /* Get the current output dma position. */
++ stat = REG_RD(dma, np->dma_out_inst, rw_stat);
++
++ dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data));
++ in_dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_in_inst, rw_data));
++ in_stat = REG_RD(dma, np->dma_in_inst, rw_stat);
++
++ printk("%s:\n"
++ "stat.list_state=%x\n"
++ "stat.mode=%x\n"
++ "stat.stream_cmd_src=%x\n"
++ "dma_pos=%x\n"
++ "tx catch=%x active=%x\n"
++ "packets=%d queue=%d sender_started=%d\n"
++ "intr_vect.r_vect=%x\n"
++ "dma.r_masked_intr=%x dma.rw_ack_intr=%x "
++ "dma.r_intr=%x dma.rw_intr_masked=%x\n"
++ "eth.r_stat=%x\n",
++ __func__,
++ stat.list_state, stat.mode, stat.stream_cmd_src,
++ (unsigned int)dma_pos,
++ (unsigned int)&np->catch_tx_desc->descr,
++ (unsigned int)&np->active_tx_desc->descr,
++ np->txpackets,
++ netif_queue_stopped(dev), np->sender_started,
++ REG_RD_INT(intr_vect, regi_irq, r_vect),
++ REG_RD_INT(dma, np->dma_out_inst, r_masked_intr),
++ REG_RD_INT(dma, np->dma_out_inst, rw_ack_intr),
++ REG_RD_INT(dma, np->dma_out_inst, r_intr),
++ REG_RD_INT(dma, np->dma_out_inst, rw_intr_mask),
++ REG_RD_INT(eth, np->eth_inst, r_stat));
++
++ printk("in_stat.list_state=%x\n"
++ "in_stat.mode=%x\n"
++ "in_stat.stream_cmd_src=%x\n"
++ "in_dma_pos=%x\n"
++ "rx last=%x prev=%x active=%x\n",
++ in_stat.list_state, in_stat.mode, in_stat.stream_cmd_src,
++ (unsigned int)in_dma_pos,
++ (unsigned int)&np->last_rx_desc->descr,
++ (unsigned int)&np->prev_rx_desc->descr,
++ (unsigned int)&np->active_rx_desc->descr);
++
++#if 0
++ printk("rx-descriptors:\n");
++ for (i = 0; i < NBR_RX_DESC; i++) {
++ printk("rxdesc[%d]=0x%x\n", i, (unsigned int)
++ virt_to_phys(&np->dma_rx_descr_list[i].descr));
++ printk("rxdesc[%d].skb=0x%x\n", i,
++ (unsigned int)np->dma_rx_descr_list[i].skb);
++ printk("rxdesc[%d].buf=0x%x\n", i,
++ (unsigned int)np->dma_rx_descr_list[i].descr.buf);
++ printk("rxdesc[%d].after=0x%x\n", i,
++ (unsigned int)np->dma_rx_descr_list[i].descr.after);
++ printk("rxdesc[%d].intr=%x\n", i,
++ np->dma_rx_descr_list[i].descr.intr);
++ printk("rxdesc[%d].eol=%x\n", i,
++ np->dma_rx_descr_list[i].descr.eol);
++ printk("rxdesc[%d].out_eop=%x\n", i,
++ np->dma_rx_descr_list[i].descr.out_eop);
++ printk("rxdesc[%d].in_eop=%x\n", i,
++ np->dma_rx_descr_list[i].descr.in_eop);
++ printk("rxdesc[%d].wait=%x\n", i,
++ np->dma_rx_descr_list[i].descr.wait);
++ }
++#endif
++
++#if 1
++ printk("tx-descriptors:\n");
++ for (i = 0; i < NBR_TX_DESC; i++) {
++ printk("txdesc[%d]=0x%x\n", i, (unsigned int)
++ virt_to_phys(&np->dma_tx_descr_list[i].descr));
++ printk("txdesc[%d].skb=0x%x\n", i,
++ (unsigned int)np->dma_tx_descr_list[i].skb);
++ printk("txdesc[%d].buf=0x%x\n", i,
++ (unsigned int)np->dma_tx_descr_list[i].descr.buf);
++ printk("txdesc[%d].after=0x%x\n", i,
++ (unsigned int)np->dma_tx_descr_list[i].descr.after);
++ printk("txdesc[%d].intr=%x\n", i,
++ np->dma_tx_descr_list[i].descr.intr);
++ printk("txdesc[%d].eol=%x\n", i,
++ np->dma_tx_descr_list[i].descr.eol);
++ printk("txdesc[%d].out_eop=%x\n", i,
++ np->dma_tx_descr_list[i].descr.out_eop);
++ printk("txdesc[%d].in_eop=%x\n", i,
++ np->dma_tx_descr_list[i].descr.in_eop);
++ printk("txdesc[%d].wait=%x\n", i,
++ np->dma_tx_descr_list[i].descr.wait);
++ }
++#endif
++}
++#endif
++
++static int __init crisv32_boot_setup(char *str)
++{
++ struct sockaddr sa = {0};
++ int i;
++
++ /* Parse the colon separated Ethernet station address */
++ for (i = 0; i < ETH_ALEN; i++) {
++ unsigned int tmp;
++ if (sscanf(str + 3*i, "%2x", &tmp) != 1) {
++ printk(KERN_WARNING "Malformed station address");
++ return 0;
++ }
++ sa.sa_data[i] = (char)tmp;
++ }
++
++ default_mac_iface0 = sa;
++ return 1;
++}
++
++__setup("crisv32_eth=", crisv32_boot_setup);
++
++module_init(crisv32_ethernet_init);
+diff -Nur linux-4.4.6.orig/drivers/net/cris/eth_v32.h linux-4.4.6/drivers/net/cris/eth_v32.h
+--- linux-4.4.6.orig/drivers/net/cris/eth_v32.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-4.4.6/drivers/net/cris/eth_v32.h 2016-03-20 15:09:24.046562363 +0100
+@@ -0,0 +1,291 @@
++/*
++ * Definitions for ETRAX FS ethernet driver.
++ *
++ * Copyright (C) 2003, 2004, 2005 Axis Communications.
++ */
++
++#ifndef _ETRAX_ETHERNET_H_
++#define _ETRAX_ETHERNET_H_
++
++#include <hwregs/dma.h>
++
++#define MAX_MEDIA_DATA_SIZE 1522 /* Max packet size. */
++
++#define NBR_RX_DESC 128 /* Number of RX descriptors. */
++#define NBR_TX_DESC 16 /* Number of TX descriptors. */
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++#define NBR_INTMEM_RX_DESC 16 /* Number of RX descriptors in int. mem.
++ * when running in gigabit mode.
++ * Should be less then NBR_RX_DESC
++ */
++#define NBR_INTMEM_TX_BUF 4 /* Number of TX buffers in int. mem
++ * when running in gigabit mode.
++ * Should be less than NBR_TX_DESC
++ */
++#endif
++
++/* Large packets are sent directly to upper layers while small packets
++ * are copied (to reduce memory waste). The following constant
++ * decides the breakpoint.
++ */
++#define RX_COPYBREAK (256)
++
++#define ETHER_HEAD_LEN (14)
++
++/*
++ * MDIO constants.
++ */
++#define MDIO_START 0x1
++#define MDIO_READ 0x2
++#define MDIO_WRITE 0x1
++#define MDIO_PREAMBLE 0xfffffffful
++
++/* Broadcom specific */
++#define MDIO_AUX_CTRL_STATUS_REG 0x18
++#define MDIO_BC_FULL_DUPLEX_IND 0x1
++#define MDIO_BC_SPEED 0x2
++
++/* TDK specific */
++#define MDIO_TDK_DIAGNOSTIC_REG 18
++#define MDIO_TDK_DIAGNOSTIC_RATE 0x400
++#define MDIO_TDK_DIAGNOSTIC_DPLX 0x800
++
++/*Intel LXT972A specific*/
++#define MDIO_INT_STATUS_REG_2 0x0011
++#define MDIO_INT_FULL_DUPLEX_IND ( 0x0001 << 9 )
++#define MDIO_INT_SPEED ( 0x0001 << 14 )
++
++/*National Semiconductor DP83865 specific*/
++#define MDIO_NAT_LINK_AN_REG 0x11
++#define MDIO_NAT_1000 (0x0001 << 4)
++#define MDIO_NAT_100 (0x0001 << 3)
++#define MDIO_NAT_FULL_DUPLEX_IND (0x0001 << 1)
++
++/* Vitesse VCS8641 specific */
++#define MDIO_VIT_AUX_STAT 0x1c
++#define MDIO_VIT_1000 (0x2 << 3)
++#define MDIO_VIT_100 (0x1 << 3)
++#define MDIO_VIT_10 0
++#define MDIO_VIT_FD (0x1 << 5)
++
++/* Davicom DM9161 specific */
++#define DM9161_OUI 0x606E
++#define MII_DM9161_SCR 0x10
++#define MII_DM9161_SCR_INIT 0x0610
++#define MII_DM9161_SCR_RMII 0x0100
++#define MII_DM9161_10BTCSR 0x12
++#define MII_DM9161_10BTCSR_INIT 0x7800
++
++/* Network flash constants */
++#define NET_FLASH_TIME (HZ/50) /* 20 ms */
++#define NET_FLASH_PAUSE (HZ/100) /* 10 ms */
++#define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
++#define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 seconds. */
++
++/* Duplex settings. */
++enum duplex {
++ half,
++ full,
++ autoneg
++};
++
++/* Some transceivers requires special handling. */
++struct transceiver_ops {
++ unsigned int oui;
++ void (*check_speed) (struct net_device * dev);
++ void (*check_duplex) (struct net_device * dev);
++};
++
++typedef struct crisv32_eth_descr {
++ dma_descr_data descr __attribute__ ((__aligned__(32)));
++ struct sk_buff *skb;
++ unsigned char *linearized_packet;
++} crisv32_eth_descr;
++
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++struct tx_buffer_list {
++ struct tx_buffer_list *next;
++ unsigned char *buf;
++ char free;
++};
++#endif
++
++/* LED stuff */
++#define CRIS_LED_GRP_0 0
++#define CRIS_LED_GRP_1 1
++#define CRIS_LED_GRP_NONE 2
++
++#define CRIS_LED_ACTIVITY 0
++#define CRIS_LED_NOACTIVITY 1
++#define CRIS_LED_LINK 2
++#define CRIS_LED_NOLINK 3
++
++struct crisv32_eth_leds {
++ unsigned int ledgrp;
++ int led_active;
++ unsigned long led_next_time;
++ struct timer_list clear_led_timer;
++ spinlock_t led_lock; /* Protect LED state */
++ int ifisup[2];
++};
++
++/* Information that need to be kept for each device. */
++struct crisv32_ethernet_local {
++ /* FIXME: These align attributes don't really help. If they are really
++ * needed alignment has to be enforced at runtime, these objects
++ * are dynamically allocated. */
++ dma_descr_context ctxt_in __attribute__ ((__aligned__(32)));
++ dma_descr_context ctxt_out __attribute__ ((__aligned__(32)));
++
++ crisv32_eth_descr dma_rx_descr_list[NBR_RX_DESC];
++ crisv32_eth_descr dma_tx_descr_list[NBR_TX_DESC];
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++ struct tx_buffer_list tx_intmem_buf_list[NBR_INTMEM_TX_BUF];
++ struct tx_buffer_list *intmem_tx_buf_active;
++ struct tx_buffer_list *intmem_tx_buf_catch;
++ int gigabit_mode;
++#endif
++ /* Transmit data path. */
++ int dma_out_inst;
++ int sender_started;
++
++ /* TX-ring state. */
++ crisv32_eth_descr *active_tx_desc;
++ crisv32_eth_descr *prev_tx_desc;
++ crisv32_eth_descr *catch_tx_desc;
++ int txpackets;
++ int retrans;
++ int do_tx_recovery;
++ struct timer_list transmit_timer;
++
++ /* Receive data path. */
++ struct napi_struct napi;
++ int dma_in_inst;
++
++ /* RX-ring state. */
++ crisv32_eth_descr *active_rx_desc;
++ crisv32_eth_descr *prev_rx_desc;
++ crisv32_eth_descr *last_rx_desc;
++
++ unsigned long newbuf;
++ u8 new_rx_package;
++ u8 pending_overrun;
++ u8 overrun_set;
++ u8 link;
++ int napi_processing;
++ struct timer_list receive_timer;
++ struct work_struct receive_work;
++ reg_eth_rw_rec_ctrl saved_rec_ctrl;
++ int saved_ga_lo;
++ int saved_ga_hi;
++ int do_rx_recovery;
++
++ /* Control paths. */
++ spinlock_t lock;
++ struct net_device *dev;
++ int eth_inst;
++
++ /* Toggle network LEDs usage at runtime */
++ int use_leds;
++ struct crisv32_eth_leds *leds;
++
++ /* PHY control. */
++ int fixed_phy;
++ spinlock_t transceiver_lock; /* Protect transceiver state. */
++ struct transceiver_ops *transceiver;
++ struct mii_if_info mii_if;
++
++ /* Specifies if we should do autonegotiation or not.
++ * TODO: This ad-hoc hack should be removed. Ethtool already supports
++ * this kind of control.
++ */
++ int autoneg_normal;
++
++ struct timer_list duplex_timer;
++ int full_duplex;
++ enum duplex current_duplex;
++
++ struct timer_list speed_timer;
++ int current_speed; /* Speed read from tranceiver */
++ int current_speed_selection; /* Speed selected by user */
++
++ /* Statistics. */
++ u64 tx_dma_restarts;
++ u64 tx_mac_resets;
++ u64 rx_dma_restarts;
++ u64 rx_dma_timeouts;
++ u64 rx_restarts_dropped;
++
++ struct net_device_stats stats;
++};
++
++/* Function prototypes. */
++static int crisv32_ethernet_init(void);
++static int crisv32_ethernet_device_init(struct net_device *dev);
++static int crisv32_eth_open(struct net_device *dev);
++static int crisv32_eth_close(struct net_device *dev);
++static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr);
++static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id);
++static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id);
++static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id);
++static int crisv32_eth_send_packet(struct sk_buff *skb, struct net_device *dev);
++static void crisv32_eth_hw_send_packet(unsigned char *buf, int length,
++ void *priv);
++static void crisv32_eth_do_tx_recovery(struct net_device *dev);
++static void crisv32_eth_set_rx_mode(struct net_device *dev);
++static int crisv32_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
++ int cmd);
++static int crisv32_eth_set_config(struct net_device *dev, struct ifmap *map);
++#ifdef CONFIG_CRIS_MACH_ARTPEC3
++static void crisv32_eth_switch_intmem_usage(struct net_device *dev);
++#endif
++static void crisv32_eth_negotiate(struct net_device *dev);
++static void crisv32_eth_set_speed(struct net_device *dev, unsigned long speed);
++#ifndef CONFIG_ETRAX_NO_PHY
++static void crisv32_eth_check_duplex(unsigned long idev);
++static void crisv32_eth_check_speed(unsigned long idev);
++#endif
++
++static void crisv32_eth_set_duplex(struct net_device *dev, enum duplex);
++static int crisv32_eth_probe_transceiver(struct net_device *dev);
++
++static struct ethtool_ops crisv32_ethtool_ops;
++
++#ifndef CONFIG_ETRAX_NO_PHY
++static void generic_check_speed(struct net_device *dev);
++static void generic_check_duplex(struct net_device *dev);
++static void broadcom_check_speed(struct net_device *dev);
++static void broadcom_check_duplex(struct net_device *dev);
++static void tdk_check_speed(struct net_device *dev);
++static void tdk_check_duplex(struct net_device *dev);
++static void intel_check_speed(struct net_device *dev);
++static void intel_check_duplex(struct net_device *dev);
++static void national_check_speed(struct net_device *dev);
++static void national_check_duplex(struct net_device *dev);
++static void vitesse_check_speed(struct net_device *dev);
++static void vitesse_check_duplex(struct net_device *dev);
++static void davicom_check_speed(struct net_device *dev);
++static void davicom_check_duplex(struct net_device *dev);
++#endif
++
++#ifdef CONFIG_NET_POLL_CONTROLLER
++static void crisv32_netpoll(struct net_device *dev);
++#endif
++
++static void crisv32_clear_network_leds(unsigned long dummy);
++static void crisv32_set_network_leds(int active, struct net_device *dev);
++
++static int crisv32_eth_get_mdio_reg(struct net_device *dev,
++ int phyid, int reg_num);
++static void crisv32_eth_set_mdio_reg(struct net_device *dev,
++ int phyid, int reg_num, int val);
++static void crisv32_eth_send_mdio_cmd(struct net_device *dev,
++ unsigned short cmd, int write_cmd);
++static void crisv32_eth_send_mdio_bit(struct net_device *dev,
++ unsigned char bit);
++static unsigned char crisv32_eth_receive_mdio_bit(struct net_device *dev);
++
++static struct net_device_stats *crisv32_get_stats(struct net_device *dev);
++static void crisv32_start_dma_out(struct crisv32_ethernet_local *np);
++
++#endif /* _ETRAX_ETHERNET_H_ */
+diff -Nur linux-4.4.6.orig/drivers/net/cris/Makefile linux-4.4.6/drivers/net/cris/Makefile
+--- linux-4.4.6.orig/drivers/net/cris/Makefile 2016-03-16 16:43:17.000000000 +0100
++++ linux-4.4.6/drivers/net/cris/Makefile 2016-03-20 11:35:09.089964990 +0100
+@@ -1 +1,2 @@
+ obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o
++obj-$(CONFIG_ETRAX_ARCH_V32) += eth_v32.o