diff -Nur linux-4.7.3.orig/arch/cris/arch-v32/drivers/Kconfig linux-4.7.3/arch/cris/arch-v32/drivers/Kconfig --- linux-4.7.3.orig/arch/cris/arch-v32/drivers/Kconfig 2016-09-07 08:35:12.000000000 +0200 +++ linux-4.7.3/arch/cris/arch-v32/drivers/Kconfig 2016-09-13 01:47:09.507717605 +0200 @@ -8,9 +8,18 @@ This option enables the ETRAX FS built-in 10/100Mbit Ethernet controller. +config ETRAX_HAVE_PHY + bool "PHY present" + default y + help + Search and use the first PHY available on the MDIO bus. Fail + if none is found. Say Y here if you are not in a switched + environment (single port device). + config ETRAX_NO_PHY bool "PHY not present" depends on ETRAX_ETHERNET + default n help This option disables all MDIO communication with an ethernet transceiver connected to the MII interface. This option shall @@ -18,6 +27,70 @@ switch. This option should normally be disabled. If enabled, speed and duplex will be locked to 100 Mbit and full duplex. +config ETRAX_PHY_FALLBACK + bool "Fixed PHY fallback" + depends on ETRAX_ETHERNET + default n + help + If no PHY is found on the MDIO bus, fall back on a fixed + 100/Full fixed PHY. Say Y here if you need dynamic PHY + presence detection (switch connection where some but not + all ports have integrated PHYs), otherwise say N. + +config ETRAX_ETHERNET_IFACE0 + depends on ETRAX_ETHERNET + bool "Enable network interface 0" + +config ETRAX_ETHERNET_IFACE1 + depends on (ETRAX_ETHERNET && ETRAXFS) + bool "Enable network interface 1 (uses DMA6 and DMA7)" + +choice + prompt "Eth0 led group" + depends on ETRAX_ETHERNET_IFACE0 + default ETRAX_ETH0_USE_LEDGRP0 + +config ETRAX_ETH0_USE_LEDGRP0 + bool "Use LED grp 0" + depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO + help + Use LED grp 0 for eth0 + +config ETRAX_ETH0_USE_LEDGRP1 + bool "Use LED grp 1" + depends on ETRAX_NBR_LED_GRP_TWO + help + Use LED grp 1 for eth0 + +config ETRAX_ETH0_USE_LEDGRPNULL + bool "Use no LEDs for eth0" + help + Use no LEDs for eth0 +endchoice + +choice + prompt "Eth1 led group" + depends on ETRAX_ETHERNET_IFACE1 + default ETRAX_ETH1_USE_LEDGRP1 + +config ETRAX_ETH1_USE_LEDGRP0 + bool "Use LED grp 0" + depends on ETRAX_NBR_LED_GRP_ONE || ETRAX_NBR_LED_GRP_TWO + help + Use LED grp 0 for eth1 + +config ETRAX_ETH1_USE_LEDGRP1 + bool "Use LED grp 1" + depends on ETRAX_NBR_LED_GRP_TWO + help + Use LED grp 1 for eth1 + +config ETRAX_ETH1_USE_LEDGRPNULL + bool "Use no LEDs for eth1" + help + Use no LEDs for eth1 +endchoice + config ETRAXFS_SERIAL bool "Serial-port support" depends on ETRAX_ARCH_V32 diff -Nur linux-4.7.3.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h linux-4.7.3/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h --- linux-4.7.3.orig/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-09-07 08:35:12.000000000 +0200 +++ linux-4.7.3/arch/cris/include/arch-v32/arch/hwregs/eth_defs.h 2016-09-13 01:47:09.527718381 +0200 @@ -2,69 +2,64 @@ #define __eth_defs_h /* - * This file is autogenerated from - * file: eth.r - * id: eth_regs.r,v 1.16 2005/05/20 15:41:22 perz Exp - * last modfied: Mon Jan 9 06:06:41 2006 - * - * by /n/asic/design/tools/rdesc/rdes2c eth.r - * id: $Id: eth_defs.h,v 1.7 2006/01/26 13:45:30 karljope Exp $ - * Any changes here will be lost. - * - * -*- buffer-read-only: t -*- + * Note: Previously this was autogenerated code from the hardware + * implementation. However, to enable the same file to be used + * for both ARTPEC-3 and ETRAX FS this file is now hand-edited. + * Be careful. */ + /* Main access macros */ #ifndef REG_RD #define REG_RD( scope, inst, reg ) \ - REG_READ( reg_##scope##_##reg, \ - (inst) + REG_RD_ADDR_##scope##_##reg ) + REG_READ( reg_##scope##_##reg, \ + (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_WR #define REG_WR( scope, inst, reg, val ) \ - REG_WRITE( reg_##scope##_##reg, \ - (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) + REG_WRITE( reg_##scope##_##reg, \ + (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) #endif #ifndef REG_RD_VECT #define REG_RD_VECT( scope, inst, reg, index ) \ - REG_READ( reg_##scope##_##reg, \ - (inst) + REG_RD_ADDR_##scope##_##reg + \ - (index) * STRIDE_##scope##_##reg ) + REG_READ( reg_##scope##_##reg, \ + (inst) + REG_RD_ADDR_##scope##_##reg + \ + (index) * STRIDE_##scope##_##reg ) #endif #ifndef REG_WR_VECT #define REG_WR_VECT( scope, inst, reg, index, val ) \ - REG_WRITE( reg_##scope##_##reg, \ - (inst) + REG_WR_ADDR_##scope##_##reg + \ - (index) * STRIDE_##scope##_##reg, (val) ) + REG_WRITE( reg_##scope##_##reg, \ + (inst) + REG_WR_ADDR_##scope##_##reg + \ + (index) * STRIDE_##scope##_##reg, (val) ) #endif #ifndef REG_RD_INT #define REG_RD_INT( scope, inst, reg ) \ - REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg ) + REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_WR_INT #define REG_WR_INT( scope, inst, reg, val ) \ - REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) + REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg, (val) ) #endif #ifndef REG_RD_INT_VECT #define REG_RD_INT_VECT( scope, inst, reg, index ) \ - REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \ - (index) * STRIDE_##scope##_##reg ) + REG_READ( int, (inst) + REG_RD_ADDR_##scope##_##reg + \ + (index) * STRIDE_##scope##_##reg ) #endif #ifndef REG_WR_INT_VECT #define REG_WR_INT_VECT( scope, inst, reg, index, val ) \ - REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \ - (index) * STRIDE_##scope##_##reg, (val) ) + REG_WRITE( int, (inst) + REG_WR_ADDR_##scope##_##reg + \ + (index) * STRIDE_##scope##_##reg, (val) ) #endif #ifndef REG_TYPE_CONV #define REG_TYPE_CONV( type, orgtype, val ) \ - ( { union { orgtype o; type n; } r; r.o = val; r.n; } ) + ( { union { orgtype o; type n; } r; r.o = val; r.n; } ) #endif #ifndef reg_page_size @@ -73,306 +68,332 @@ #ifndef REG_ADDR #define REG_ADDR( scope, inst, reg ) \ - ( (inst) + REG_RD_ADDR_##scope##_##reg ) + ( (inst) + REG_RD_ADDR_##scope##_##reg ) #endif #ifndef REG_ADDR_VECT #define REG_ADDR_VECT( scope, inst, reg, index ) \ - ( (inst) + REG_RD_ADDR_##scope##_##reg + \ - (index) * STRIDE_##scope##_##reg ) + ( (inst) + REG_RD_ADDR_##scope##_##reg + \ + (index) * STRIDE_##scope##_##reg ) #endif /* C-code for register scope eth */ /* Register rw_ma0_lo, scope eth, type rw */ typedef struct { - unsigned int addr : 32; + unsigned int addr : 32; } reg_eth_rw_ma0_lo; #define REG_RD_ADDR_eth_rw_ma0_lo 0 #define REG_WR_ADDR_eth_rw_ma0_lo 0 /* Register rw_ma0_hi, scope eth, type rw */ typedef struct { - unsigned int addr : 16; - unsigned int dummy1 : 16; + unsigned int addr : 16; + unsigned int dummy1 : 16; } reg_eth_rw_ma0_hi; #define REG_RD_ADDR_eth_rw_ma0_hi 4 #define REG_WR_ADDR_eth_rw_ma0_hi 4 /* Register rw_ma1_lo, scope eth, type rw */ typedef struct { - unsigned int addr : 32; + unsigned int addr : 32; } reg_eth_rw_ma1_lo; #define REG_RD_ADDR_eth_rw_ma1_lo 8 #define REG_WR_ADDR_eth_rw_ma1_lo 8 /* Register rw_ma1_hi, scope eth, type rw */ typedef struct { - unsigned int addr : 16; - unsigned int dummy1 : 16; + unsigned int addr : 16; + unsigned int dummy1 : 16; } reg_eth_rw_ma1_hi; #define REG_RD_ADDR_eth_rw_ma1_hi 12 #define REG_WR_ADDR_eth_rw_ma1_hi 12 /* Register rw_ga_lo, scope eth, type rw */ typedef struct { - unsigned int tbl : 32; + unsigned int table : 32; } reg_eth_rw_ga_lo; #define REG_RD_ADDR_eth_rw_ga_lo 16 #define REG_WR_ADDR_eth_rw_ga_lo 16 /* Register rw_ga_hi, scope eth, type rw */ typedef struct { - unsigned int tbl : 32; + unsigned int table : 32; } reg_eth_rw_ga_hi; #define REG_RD_ADDR_eth_rw_ga_hi 20 #define REG_WR_ADDR_eth_rw_ga_hi 20 /* Register rw_gen_ctrl, scope eth, type rw */ typedef struct { - unsigned int en : 1; - unsigned int phy : 2; - unsigned int protocol : 1; - unsigned int loopback : 1; - unsigned int flow_ctrl : 1; - unsigned int gtxclk_out : 1; - unsigned int phyrst_n : 1; - unsigned int dummy1 : 24; + unsigned int en : 1; + unsigned int phy : 2; + unsigned int protocol : 1; + unsigned int loopback : 1; + unsigned int flow_ctrl : 1; + unsigned int gtxclk_out : 1; + unsigned int phyrst_n : 1; + unsigned int dummy1 : 24; } reg_eth_rw_gen_ctrl; #define REG_RD_ADDR_eth_rw_gen_ctrl 24 #define REG_WR_ADDR_eth_rw_gen_ctrl 24 /* Register rw_rec_ctrl, scope eth, type rw */ typedef struct { - unsigned int ma0 : 1; - unsigned int ma1 : 1; - unsigned int individual : 1; - unsigned int broadcast : 1; - unsigned int undersize : 1; - unsigned int oversize : 1; - unsigned int bad_crc : 1; - unsigned int duplex : 1; - unsigned int max_size : 16; - unsigned int dummy1 : 8; + unsigned int ma0 : 1; + unsigned int ma1 : 1; + unsigned int individual : 1; + unsigned int broadcast : 1; + unsigned int undersize : 1; + unsigned int oversize : 1; + unsigned int bad_crc : 1; + unsigned int duplex : 1; +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + unsigned int max_size : 16; + unsigned int dummy1 : 8; +#else + unsigned int max_size : 1; + unsigned int dummy1 : 23; +#endif } reg_eth_rw_rec_ctrl; #define REG_RD_ADDR_eth_rw_rec_ctrl 28 #define REG_WR_ADDR_eth_rw_rec_ctrl 28 /* Register rw_tr_ctrl, scope eth, type rw */ typedef struct { - unsigned int crc : 1; - unsigned int pad : 1; - unsigned int retry : 1; - unsigned int ignore_col : 1; - unsigned int cancel : 1; - unsigned int hsh_delay : 1; - unsigned int ignore_crs : 1; - unsigned int carrier_ext : 1; - unsigned int dummy1 : 24; + unsigned int crc : 1; + unsigned int pad : 1; + unsigned int retry : 1; + unsigned int ignore_col : 1; + unsigned int cancel : 1; + unsigned int hsh_delay : 1; + unsigned int ignore_crs : 1; + unsigned int carrier_ext : 1; + unsigned int dummy1 : 24; } reg_eth_rw_tr_ctrl; #define REG_RD_ADDR_eth_rw_tr_ctrl 32 #define REG_WR_ADDR_eth_rw_tr_ctrl 32 /* Register rw_clr_err, scope eth, type rw */ typedef struct { - unsigned int clr : 1; - unsigned int dummy1 : 31; + unsigned int clr : 1; + unsigned int dummy1 : 31; } reg_eth_rw_clr_err; #define REG_RD_ADDR_eth_rw_clr_err 36 #define REG_WR_ADDR_eth_rw_clr_err 36 /* Register rw_mgm_ctrl, scope eth, type rw */ typedef struct { - unsigned int mdio : 1; - unsigned int mdoe : 1; - unsigned int mdc : 1; - unsigned int dummy1 : 29; + unsigned int mdio : 1; + unsigned int mdoe : 1; + unsigned int mdc : 1; + unsigned int phyclk : 1; + unsigned int txdata : 4; + unsigned int txen : 1; + unsigned int dummy1 : 23; } reg_eth_rw_mgm_ctrl; #define REG_RD_ADDR_eth_rw_mgm_ctrl 40 #define REG_WR_ADDR_eth_rw_mgm_ctrl 40 /* Register r_stat, scope eth, type r */ typedef struct { - unsigned int mdio : 1; - unsigned int exc_col : 1; - unsigned int urun : 1; - unsigned int clk_125 : 1; - unsigned int dummy1 : 28; + unsigned int mdio : 1; + unsigned int exc_col : 1; + unsigned int urun : 1; +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + unsigned int clk_125 : 1; +#else + unsigned int phyclk : 1; +#endif + unsigned int txdata : 4; + unsigned int txen : 1; + unsigned int col : 1; + unsigned int crs : 1; + unsigned int txclk : 1; + unsigned int rxdata : 4; + unsigned int rxer : 1; + unsigned int rxdv : 1; + unsigned int rxclk : 1; + unsigned int dummy1 : 13; } reg_eth_r_stat; #define REG_RD_ADDR_eth_r_stat 44 /* Register rs_rec_cnt, scope eth, type rs */ typedef struct { - unsigned int crc_err : 8; - unsigned int align_err : 8; - unsigned int oversize : 8; - unsigned int congestion : 8; + unsigned int crc_err : 8; + unsigned int align_err : 8; + unsigned int oversize : 8; + unsigned int congestion : 8; } reg_eth_rs_rec_cnt; #define REG_RD_ADDR_eth_rs_rec_cnt 48 /* Register r_rec_cnt, scope eth, type r */ typedef struct { - unsigned int crc_err : 8; - unsigned int align_err : 8; - unsigned int oversize : 8; - unsigned int congestion : 8; + unsigned int crc_err : 8; + unsigned int align_err : 8; + unsigned int oversize : 8; + unsigned int congestion : 8; } reg_eth_r_rec_cnt; #define REG_RD_ADDR_eth_r_rec_cnt 52 /* Register rs_tr_cnt, scope eth, type rs */ typedef struct { - unsigned int single_col : 8; - unsigned int mult_col : 8; - unsigned int late_col : 8; - unsigned int deferred : 8; + unsigned int single_col : 8; + unsigned int mult_col : 8; + unsigned int late_col : 8; + unsigned int deferred : 8; } reg_eth_rs_tr_cnt; #define REG_RD_ADDR_eth_rs_tr_cnt 56 /* Register r_tr_cnt, scope eth, type r */ typedef struct { - unsigned int single_col : 8; - unsigned int mult_col : 8; - unsigned int late_col : 8; - unsigned int deferred : 8; + unsigned int single_col : 8; + unsigned int mult_col : 8; + unsigned int late_col : 8; + unsigned int deferred : 8; } reg_eth_r_tr_cnt; #define REG_RD_ADDR_eth_r_tr_cnt 60 /* Register rs_phy_cnt, scope eth, type rs */ typedef struct { - unsigned int carrier_loss : 8; - unsigned int sqe_err : 8; - unsigned int dummy1 : 16; + unsigned int carrier_loss : 8; + unsigned int sqe_err : 8; + unsigned int dummy1 : 16; } reg_eth_rs_phy_cnt; #define REG_RD_ADDR_eth_rs_phy_cnt 64 /* Register r_phy_cnt, scope eth, type r */ typedef struct { - unsigned int carrier_loss : 8; - unsigned int sqe_err : 8; - unsigned int dummy1 : 16; + unsigned int carrier_loss : 8; + unsigned int sqe_err : 8; + unsigned int dummy1 : 16; } reg_eth_r_phy_cnt; #define REG_RD_ADDR_eth_r_phy_cnt 68 /* Register rw_test_ctrl, scope eth, type rw */ typedef struct { - unsigned int snmp_inc : 1; - unsigned int snmp : 1; - unsigned int backoff : 1; - unsigned int dummy1 : 29; + unsigned int snmp_inc : 1; + unsigned int snmp : 1; + unsigned int backoff : 1; + unsigned int dummy1 : 29; } reg_eth_rw_test_ctrl; #define REG_RD_ADDR_eth_rw_test_ctrl 72 #define REG_WR_ADDR_eth_rw_test_ctrl 72 /* Register rw_intr_mask, scope eth, type rw */ typedef struct { - unsigned int crc : 1; - unsigned int align : 1; - unsigned int oversize : 1; - unsigned int congestion : 1; - unsigned int single_col : 1; - unsigned int mult_col : 1; - unsigned int late_col : 1; - unsigned int deferred : 1; - unsigned int carrier_loss : 1; - unsigned int sqe_test_err : 1; - unsigned int orun : 1; - unsigned int urun : 1; - unsigned int exc_col : 1; - unsigned int mdio : 1; - unsigned int dummy1 : 18; + unsigned int crc : 1; + unsigned int align : 1; + unsigned int oversize : 1; + unsigned int congestion : 1; + unsigned int single_col : 1; + unsigned int mult_col : 1; + unsigned int late_col : 1; + unsigned int deferred : 1; + unsigned int carrier_loss : 1; + unsigned int sqe_test_err : 1; + unsigned int orun : 1; + unsigned int urun : 1; + unsigned int exc_col : 1; + unsigned int mdio : 1; + unsigned int dummy1 : 18; } reg_eth_rw_intr_mask; #define REG_RD_ADDR_eth_rw_intr_mask 76 #define REG_WR_ADDR_eth_rw_intr_mask 76 /* Register rw_ack_intr, scope eth, type rw */ typedef struct { - unsigned int crc : 1; - unsigned int align : 1; - unsigned int oversize : 1; - unsigned int congestion : 1; - unsigned int single_col : 1; - unsigned int mult_col : 1; - unsigned int late_col : 1; - unsigned int deferred : 1; - unsigned int carrier_loss : 1; - unsigned int sqe_test_err : 1; - unsigned int orun : 1; - unsigned int urun : 1; - unsigned int exc_col : 1; - unsigned int mdio : 1; - unsigned int dummy1 : 18; + unsigned int crc : 1; + unsigned int align : 1; + unsigned int oversize : 1; + unsigned int congestion : 1; + unsigned int single_col : 1; + unsigned int mult_col : 1; + unsigned int late_col : 1; + unsigned int deferred : 1; + unsigned int carrier_loss : 1; + unsigned int sqe_test_err : 1; + unsigned int orun : 1; + unsigned int urun : 1; + unsigned int exc_col : 1; + unsigned int mdio : 1; + unsigned int dummy1 : 18; } reg_eth_rw_ack_intr; #define REG_RD_ADDR_eth_rw_ack_intr 80 #define REG_WR_ADDR_eth_rw_ack_intr 80 /* Register r_intr, scope eth, type r */ typedef struct { - unsigned int crc : 1; - unsigned int align : 1; - unsigned int oversize : 1; - unsigned int congestion : 1; - unsigned int single_col : 1; - unsigned int mult_col : 1; - unsigned int late_col : 1; - unsigned int deferred : 1; - unsigned int carrier_loss : 1; - unsigned int sqe_test_err : 1; - unsigned int orun : 1; - unsigned int urun : 1; - unsigned int exc_col : 1; - unsigned int mdio : 1; - unsigned int dummy1 : 18; + unsigned int crc : 1; + unsigned int align : 1; + unsigned int oversize : 1; + unsigned int congestion : 1; + unsigned int single_col : 1; + unsigned int mult_col : 1; + unsigned int late_col : 1; + unsigned int deferred : 1; + unsigned int carrier_loss : 1; + unsigned int sqe_test_err : 1; + unsigned int orun : 1; + unsigned int urun : 1; + unsigned int exc_col : 1; + unsigned int mdio : 1; + unsigned int dummy1 : 18; } reg_eth_r_intr; #define REG_RD_ADDR_eth_r_intr 84 /* Register r_masked_intr, scope eth, type r */ typedef struct { - unsigned int crc : 1; - unsigned int align : 1; - unsigned int oversize : 1; - unsigned int congestion : 1; - unsigned int single_col : 1; - unsigned int mult_col : 1; - unsigned int late_col : 1; - unsigned int deferred : 1; - unsigned int carrier_loss : 1; - unsigned int sqe_test_err : 1; - unsigned int orun : 1; - unsigned int urun : 1; - unsigned int exc_col : 1; - unsigned int mdio : 1; - unsigned int dummy1 : 18; + unsigned int crc : 1; + unsigned int align : 1; + unsigned int oversize : 1; + unsigned int congestion : 1; + unsigned int single_col : 1; + unsigned int mult_col : 1; + unsigned int late_col : 1; + unsigned int deferred : 1; + unsigned int carrier_loss : 1; + unsigned int sqe_test_err : 1; + unsigned int orun : 1; + unsigned int urun : 1; + unsigned int exc_col : 1; + unsigned int mdio : 1; + unsigned int dummy1 : 18; } reg_eth_r_masked_intr; #define REG_RD_ADDR_eth_r_masked_intr 88 - /* Constants */ enum { - regk_eth_discard = 0x00000000, - regk_eth_ether = 0x00000000, - regk_eth_full = 0x00000001, - regk_eth_gmii = 0x00000003, - regk_eth_gtxclk = 0x00000001, - regk_eth_half = 0x00000000, - regk_eth_hsh = 0x00000001, - regk_eth_mii = 0x00000001, - regk_eth_mii_arec = 0x00000002, - regk_eth_mii_clk = 0x00000000, - regk_eth_no = 0x00000000, - regk_eth_phyrst = 0x00000000, - regk_eth_rec = 0x00000001, - regk_eth_rw_ga_hi_default = 0x00000000, - regk_eth_rw_ga_lo_default = 0x00000000, - regk_eth_rw_gen_ctrl_default = 0x00000000, - regk_eth_rw_intr_mask_default = 0x00000000, - regk_eth_rw_ma0_hi_default = 0x00000000, - regk_eth_rw_ma0_lo_default = 0x00000000, - regk_eth_rw_ma1_hi_default = 0x00000000, - regk_eth_rw_ma1_lo_default = 0x00000000, - regk_eth_rw_mgm_ctrl_default = 0x00000000, - regk_eth_rw_test_ctrl_default = 0x00000000, - regk_eth_size1518 = 0x000005ee, - regk_eth_size1522 = 0x000005f2, - regk_eth_yes = 0x00000001 + regk_eth_discard = 0x00000000, + regk_eth_ether = 0x00000000, + regk_eth_full = 0x00000001, + regk_eth_gmii = 0x00000003, + regk_eth_gtxclk = 0x00000001, + regk_eth_half = 0x00000000, + regk_eth_hsh = 0x00000001, + regk_eth_mii = 0x00000001, + regk_eth_mii_arec = 0x00000002, + regk_eth_mii_clk = 0x00000000, + regk_eth_no = 0x00000000, + regk_eth_phyrst = 0x00000000, + regk_eth_rec = 0x00000001, + regk_eth_rw_ga_hi_default = 0x00000000, + regk_eth_rw_ga_lo_default = 0x00000000, + regk_eth_rw_gen_ctrl_default = 0x00000000, + regk_eth_rw_intr_mask_default = 0x00000000, + regk_eth_rw_ma0_hi_default = 0x00000000, + regk_eth_rw_ma0_lo_default = 0x00000000, + regk_eth_rw_ma1_hi_default = 0x00000000, + regk_eth_rw_ma1_lo_default = 0x00000000, + regk_eth_rw_mgm_ctrl_default = 0x00000000, + regk_eth_rw_test_ctrl_default = 0x00000000, +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + regk_eth_size1518 = 0x000005ee, + regk_eth_size1522 = 0x000005f2, +#else + regk_eth_size1518 = 0x00000000, + regk_eth_size1522 = 0x00000001, +#endif + regk_eth_yes = 0x00000001 }; + #endif /* __eth_defs_h */ diff -Nur linux-4.7.3.orig/drivers/net/cris/eth_v32.c linux-4.7.3/drivers/net/cris/eth_v32.c --- linux-4.7.3.orig/drivers/net/cris/eth_v32.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-4.7.3/drivers/net/cris/eth_v32.c 2016-09-13 01:48:05.953910422 +0200 @@ -0,0 +1,3060 @@ +/* + * Driver for the ETRAX FS/Artpec-3 network controller. + * + * Copyright (c) 2003-2008 Axis Communications AB. + * + * TODO: + * * Decrease the amount of code running with interrupts disabled. + * * Rework the error handling so that we do not need to touch the tx + * ring from the error interrupts. When done, we should be able to + * do tx completition from the NAPI loop without disabling interrupts. + * * Remove the gigabit code. It's probably never going to be used. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include /* CRIS_LED_* I/O functions */ +#include +#include +#include +#include +#include +#ifdef CONFIG_ETRAXFS +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#include + +#include "eth_v32.h" + +#ifndef CONFIG_ETRAXFS +#define ETH0_INTR_VECT ETH_INTR_VECT +#define ETH1_INTR_VECT ETH_INTR_VECT +#define regi_eth0 regi_eth +#define regi_eth1 regi_ +#endif + +#define DEBUG(x) +#define GET_BIT(bit,val) (((val) >> (bit)) & 0x01) + +#if defined(CONFIG_ETRAX_HAVE_PHY) || defined(CONFIG_ETRAX_PHY_FALLBACK) +#define RESET_PHY 1 +#else +#define RESET_PHY 0 +#endif + +enum { + HAVE_PHY, + NO_PHY, + FALLBACK_PHY, +}; +#if defined(CONFIG_ETRAX_PHY_FALLBACK) +#define PHY_MODE (FALLBACK_PHY) +#elif defined(CONFIG_ETRAX_NO_PHY) +#define PHY_MODE (NO_PHY) +#elif defined(CONFIG_ETRAX_HAVE_PHY) +#define PHY_MODE (HAVE_PHY) +#else +#error Unknown PHY behaviour +#endif + +static struct { + const char str[ETH_GSTRING_LEN]; +} const ethtool_stats_keys[] = { + { "tx_dma_restarts" }, + { "tx_mac_resets" }, + { "rx_dma_restarts" }, + { "rx_dma_timeouts" }, + { " dropped_rx" } +}; + +static void crisv32_eth_check_speed(unsigned long idev); +static void crisv32_eth_check_duplex(unsigned long idev); +static void update_rx_stats(struct crisv32_ethernet_local *np); +static void update_tx_stats(struct crisv32_ethernet_local *np); +static int crisv32_eth_poll(struct napi_struct *napi, int budget); +static void crisv32_eth_setup_controller(struct net_device *dev); +static int crisv32_eth_request_irqdma(struct net_device *dev); +#ifdef CONFIG_CRIS_MACH_ARTPEC3 +static void +crisv32_eth_restart_rx_dma(struct net_device* dev, + struct crisv32_ethernet_local *np); +#endif +#if 0 +static void crisv32_ethernet_bug(struct net_device *dev); +#endif + +/* + * The name of the card. Is used for messages and in the requests for + * io regions, irqs and dma channels. + */ +#ifdef CONFIG_ETRAXFS +static const char cardname[] = "ETRAX FS built-in ethernet controller"; +#else +static const char cardname[] = "ARTPEC-3 built-in ethernet controller"; +#endif + +/* Some chipset needs special care. */ +#ifndef CONFIG_ETRAX_NO_PHY +struct transceiver_ops transceivers[] = { + {0x1018, broadcom_check_speed, broadcom_check_duplex}, + {0x50EF, broadcom_check_speed, broadcom_check_duplex}, + /* TDK 2120 and TDK 2120C */ + {0xC039, tdk_check_speed, tdk_check_duplex}, + {0x039C, tdk_check_speed, tdk_check_duplex}, + /* Intel LXT972A*/ + {0x04de, intel_check_speed, intel_check_duplex}, + /* National Semiconductor DP83865 */ + {0x0017, national_check_speed, national_check_duplex}, + /* Vitesse VCS8641 */ + {0x01c1, vitesse_check_speed, vitesse_check_duplex}, + /* Davicom DM9161 */ + {0x606E, davicom_check_speed, davicom_check_duplex}, + /* Generic, must be last. */ + {0x0000, generic_check_speed, generic_check_duplex} +}; +#endif + +static struct net_device *crisv32_dev[2]; +static struct crisv32_eth_leds *crisv32_leds[3]; + +/* Default MAC address for interface 0. + * The real one will be set later. */ +static struct sockaddr default_mac_iface0 = + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00}}; + +#ifdef CONFIG_CPU_FREQ +static int +crisv32_ethernet_freq_notifier(struct notifier_block *nb, unsigned long val, + void *data); + +static struct notifier_block crisv32_ethernet_freq_notifier_block = { + .notifier_call = crisv32_ethernet_freq_notifier +}; +#endif + +static void receive_timeout(unsigned long arg); +static void receive_timeout_work(struct work_struct* work); +static void transmit_timeout(unsigned long arg); + +/* + * mask in and out tx/rx interrupts. + */ +static inline void crisv32_disable_tx_ints(struct crisv32_ethernet_local *np) +{ + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_no }; + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx); +} + +static inline void crisv32_enable_tx_ints(struct crisv32_ethernet_local *np) +{ + reg_dma_rw_intr_mask intr_mask_tx = { .data = regk_dma_yes }; + REG_WR(dma, np->dma_out_inst, rw_intr_mask, intr_mask_tx); +} + +static inline void crisv32_disable_rx_ints(struct crisv32_ethernet_local *np) +{ + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_no }; + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx); +} + +static inline void crisv32_enable_rx_ints(struct crisv32_ethernet_local *np) +{ + reg_dma_rw_intr_mask intr_mask_rx = { .in_eop = regk_dma_yes }; + REG_WR(dma, np->dma_in_inst, rw_intr_mask, intr_mask_rx); +} + +static inline void crisv32_disable_eth_ints(struct crisv32_ethernet_local *np) +{ + int intr_mask_nw = 0x0; + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw); +} + +static inline void crisv32_enable_eth_ints(struct crisv32_ethernet_local *np) +{ +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + /* For Artpec-3 we use overrun to workaround voodoo TR 87 */ + int intr_mask_nw = 0x1c00; +#else + int intr_mask_nw = 0x1800; +#endif + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw); +} + +static inline int crisv32_eth_gigabit(struct crisv32_ethernet_local *np) +{ +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + return np->gigabit_mode; +#else + return 0; +#endif +} + +static inline void crisv32_eth_set_gigabit(struct crisv32_ethernet_local *np, + int g) +{ +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + np->gigabit_mode = g; +#endif +} + +/* start/stop receiver */ +static inline void crisv32_start_receiver(struct crisv32_ethernet_local *np) +{ + reg_eth_rw_rec_ctrl rec_ctrl; + + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl); + rec_ctrl.ma0 = regk_eth_yes; + rec_ctrl.broadcast = regk_eth_rec; + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl); +} + +static inline void crisv32_stop_receiver(struct crisv32_ethernet_local *np) +{ + reg_eth_rw_rec_ctrl rec_ctrl; + + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl); + rec_ctrl.ma0 = regk_eth_no; + rec_ctrl.broadcast = regk_eth_discard; + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl); +} + +static inline void crisv32_eth_reset(struct crisv32_ethernet_local *np) +{ + reg_eth_rw_gen_ctrl gen_ctrl = { 0 }; + + gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl); + gen_ctrl.en = regk_eth_no; + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); + gen_ctrl.en = regk_eth_yes; + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); +} + +static void crisv32_eth_tx_cancel_frame(struct crisv32_ethernet_local *np) +{ + reg_eth_rw_tr_ctrl tr_ctrl; + + /* Cancel any pending transmits. This should bring us to the + excessive collisions state but it doesn't always do it. */ + tr_ctrl = REG_RD(eth, np->eth_inst, rw_tr_ctrl); + tr_ctrl.cancel = 1; + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl); + tr_ctrl.cancel = 0; + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl); +} + +/* + * Hack to disconnect/reconnect the dma from the ethernet block while we reset + * things. TODO: verify that we don't need to disconnect out channels and + * remove that code. + * + * ARTPEC-3 has only a single ethernet block so np->eth_inst is always eth0. + * The strmux values are named slightly different, redefine to avoid #ifdefs + * in the code blocks. For artpec3 only regk_strmux_eth0 and channel 0/1 + * should be used. + */ +#ifdef CONFIG_CRIS_MACH_ARTPEC3 +#define regk_strmux_eth0 regk_strmux_eth +#define regk_strmux_eth1 regk_strmux_eth +#endif +static inline void +crisv32_disconnect_eth_tx_dma(struct crisv32_ethernet_local *np) +{ + reg_strmux_rw_cfg strmux_cfg; + + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); + if (np->eth_inst == regi_eth0) + strmux_cfg.dma0 = regk_strmux_off; + else + strmux_cfg.dma6 = regk_strmux_off; + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); +} + +static inline void crisv32_connect_eth_tx_dma(struct crisv32_ethernet_local *np) +{ + reg_strmux_rw_cfg strmux_cfg; + + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); + if (np->eth_inst == regi_eth0) + strmux_cfg.dma0 = regk_strmux_eth0; + else + strmux_cfg.dma6 = regk_strmux_eth1; + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); +} + +static inline void +crisv32_disconnect_eth_rx_dma(struct crisv32_ethernet_local *np) +{ + reg_strmux_rw_cfg strmux_cfg; + + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); + if (np->eth_inst == regi_eth0) + strmux_cfg.dma1 = regk_strmux_off; + else + strmux_cfg.dma7 = regk_strmux_off; + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); +} + +static inline void crisv32_connect_eth_rx_dma(struct crisv32_ethernet_local *np) +{ + reg_strmux_rw_cfg strmux_cfg; + + strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); + if (np->eth_inst == regi_eth0) + strmux_cfg.dma1 = regk_strmux_eth0; + else + strmux_cfg.dma7 = regk_strmux_eth1; + REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); +} + +static int dma_wait_busy(int inst, int timeout) +{ + reg_dma_rw_stream_cmd dma_sc; + + do { + dma_sc = REG_RD(dma, inst, rw_stream_cmd); + } while (timeout-- > 0 && dma_sc.busy); + return dma_sc.busy; +} + +static int __init crisv32_eth_request_irqdma(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + + /* Allocate IRQs and DMAs. */ + if (np->eth_inst == regi_eth0) { + if (request_irq(DMA0_INTR_VECT, crisv32tx_eth_interrupt, + 0, "Ethernet TX", dev)) { + return -EAGAIN; + } + + if (request_irq(DMA1_INTR_VECT, crisv32rx_eth_interrupt, + 0, "Ethernet RX", dev)) + goto err0_1; + + if (crisv32_request_dma(0, cardname, DMA_VERBOSE_ON_ERROR, + 12500000, dma_eth0)) + goto err0_2; + + if (crisv32_request_dma(1, cardname, DMA_VERBOSE_ON_ERROR, + 12500000, dma_eth0)) + goto err0_3; + + if (request_irq(ETH0_INTR_VECT, crisv32nw_eth_interrupt, 0, + cardname, dev)) { + crisv32_free_dma(1); +err0_3: + crisv32_free_dma(0); +err0_2: + free_irq(DMA1_INTR_VECT, dev); +err0_1: + free_irq(DMA0_INTR_VECT, dev); + return -EAGAIN; + } + } else { + if (request_irq(DMA6_INTR_VECT, crisv32tx_eth_interrupt, + 0, cardname, dev)) + return -EAGAIN; + + if (request_irq(DMA7_INTR_VECT, crisv32rx_eth_interrupt, + 0, cardname, dev)) + goto err1_1; + + if (crisv32_request_dma(6, cardname, DMA_VERBOSE_ON_ERROR, + 0, dma_eth1)) + goto err1_2; + + if (crisv32_request_dma(7, cardname, DMA_VERBOSE_ON_ERROR, + 0, dma_eth1)) + goto err1_3; + + if (request_irq(ETH1_INTR_VECT, crisv32nw_eth_interrupt, 0, + cardname, dev)) { + crisv32_free_dma(7); +err1_3: + crisv32_free_dma(6); +err1_2: + free_irq(DMA7_INTR_VECT, dev); +err1_1: + free_irq(DMA6_INTR_VECT, dev); + return -EAGAIN; + } + } + return 0; +} + +static int __init crisv32_eth_init_phy(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0); + + if (RESET_PHY) { +#ifdef CONFIG_ETRAXFS + reg_config_rw_pad_ctrl pad_ctrl; + pad_ctrl = REG_RD(config, regi_config, rw_pad_ctrl); + pad_ctrl.phyrst_n = 0; + REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl); + + udelay(500); /* RESET_LEN */ + + pad_ctrl.phyrst_n = 1; + REG_WR(config, regi_config, rw_pad_ctrl, pad_ctrl); +#else + reg_eth_rw_gen_ctrl gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl); + gen_ctrl.phyrst_n = 0; + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); + + udelay(500); /* RESET_LEN */ + + gen_ctrl.phyrst_n = 1; + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); +#endif + + udelay(200); /* RESET_WAIT */ + } + + switch (PHY_MODE) { + case FALLBACK_PHY: + /* Fall back on using fixed iff there is no PHY on */ + /* the MDIO bus */ + np->fixed_phy = crisv32_eth_probe_transceiver(dev) != 0; + if (np->fixed_phy) + printk(KERN_WARNING + "eth: No transciever found, falling back " + "to fixed phy mode\n"); + break; + + case NO_PHY: + /* Don't even bother looking for a PHY, always rely */ + /* on fixed PHY */ + np->fixed_phy = 1; + break; + + default: /* HAVE_PHY */ + /* Look for a PHY and abort if there is none, */ + /* otherwise just carry on */ + if (crisv32_eth_probe_transceiver(dev)) { + printk(KERN_WARNING + "eth: No transceiver found, " + "removing interface\n"); + return -ENODEV; + } + np->fixed_phy = 0; + } + + if (np->fixed_phy) { + reg_eth_rw_rec_ctrl rec_ctrl; + + /* speed */ + np->current_speed = 100; + np->current_speed_selection = 100; /* Auto. */ + + /* duplex */ + np->full_duplex = 1; + np->current_duplex = full; + + rec_ctrl = REG_RD(eth, np->eth_inst, rw_rec_ctrl); + rec_ctrl.duplex = regk_eth_full; + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl); + } else { + np->mii_if.supports_gmii = mii_check_gmii_support(&np->mii_if); + + /* speed */ + np->current_speed = 10; + np->current_speed_selection = 0; /* Auto. */ + np->speed_timer = timer_init; + np->speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; + np->speed_timer.data = (unsigned long) dev; + np->speed_timer.function = crisv32_eth_check_speed; + + /* duplex */ + np->full_duplex = 0; + np->current_duplex = autoneg; + np->duplex_timer = timer_init; + np->duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; + np->duplex_timer.data = (unsigned long) dev; + np->duplex_timer.function = crisv32_eth_check_duplex; + } + + return 0; +} + +static void __init crisv32_eth_setup_controller(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + reg_eth_rw_gen_ctrl gen_ctrl; + + reg_eth_rw_tr_ctrl tr_ctrl = { + /* SW retransmits to avoid transmitter bugs. */ + .retry = regk_eth_no, + .pad = regk_eth_yes, + .crc = regk_eth_yes + }; + + reg_eth_rw_rec_ctrl rec_ctrl = { + .ma0 = regk_eth_no, /* enable at open() */ + .broadcast = regk_eth_no, + .max_size = regk_eth_size1522 + }; + + reg_eth_rw_ga_lo ga_lo = { 0 }; + reg_eth_rw_ga_hi ga_hi = { 0 }; + + /* + * Initialize group address registers to make sure that no + * unwanted addresses are matched. + */ + REG_WR(eth, np->eth_inst, rw_ga_lo, ga_lo); + REG_WR(eth, np->eth_inst, rw_ga_hi, ga_hi); + + /* Configure receiver and transmitter */ + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl); + REG_WR(eth, np->eth_inst, rw_tr_ctrl, tr_ctrl); + + /* + * Read from rw_gen_ctrl so that we don't override any previous + * configuration. + */ + gen_ctrl = REG_RD(eth, np->eth_inst, rw_gen_ctrl); + gen_ctrl.phy = regk_eth_mii_clk; +#ifdef CONFIG_ETRAXFS + /* On ETRAX FS, this bit has reversed meaning */ + gen_ctrl.flow_ctrl = regk_eth_no; +#else + gen_ctrl.flow_ctrl = regk_eth_yes; +#endif + + /* Enable ethernet controller with mii clk. */ + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); + gen_ctrl.en = regk_eth_yes; + REG_WR(eth, np->eth_inst, rw_gen_ctrl, gen_ctrl); +} + +static void crisv32_eth_reset_rx_ring(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + int i; + + /* cleanup the rx-ring */ + for (i = 0; i < NBR_RX_DESC; i++) { + struct sk_buff *skb; + skb = np->dma_rx_descr_list[i].skb; + if (!skb + || (np->dma_rx_descr_list[i].descr.buf != + (void *)virt_to_phys(skb->data))) { + if (skb) + dev_kfree_skb(skb); + skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE); + np->dma_rx_descr_list[i].skb = skb; + np->dma_rx_descr_list[i].descr.buf = + (char*)virt_to_phys(skb->data); + } + if (np->dma_rx_descr_list[i].descr.in_eop) + np->rx_restarts_dropped++; + np->dma_rx_descr_list[i].descr.after = + (char*)virt_to_phys(skb->data + + MAX_MEDIA_DATA_SIZE); + np->dma_rx_descr_list[i].descr.eol = 0; + np->dma_rx_descr_list[i].descr.in_eop = 0; + /* Workaround cache bug */ + flush_dma_descr(&np->dma_rx_descr_list[i].descr, 1); + } + + /* reset rx-ring */ + np->active_rx_desc = &np->dma_rx_descr_list[0]; + np->prev_rx_desc = &np->dma_rx_descr_list[NBR_RX_DESC - 1]; + np->last_rx_desc = np->prev_rx_desc; + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.eol = 1; + flush_dma_descr(&np->dma_rx_descr_list[NBR_RX_DESC - 1].descr, 0); + /* ready to accept new packets. */ + np->new_rx_package = 1; + + /* Fill context descriptors. */ + np->ctxt_in.next = 0; + np->ctxt_in.saved_data = + (void *)virt_to_phys(&np->active_rx_desc->descr); + np->ctxt_in.saved_data_buf = np->active_rx_desc->descr.buf; +} + +static inline int crisv32_eth_tx_ring_full(struct crisv32_ethernet_local *np) +{ + crisv32_eth_descr *active = np->active_tx_desc; + +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + active = phys_to_virt((unsigned long)active->descr.next); +#endif + if (active == np->catch_tx_desc) + return 1; + return 0; +} + +static void crisv32_eth_reset_tx_ring(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + + /* free un-handled tx packets */ + while (np->txpackets || np->catch_tx_desc != np->active_tx_desc) { + np->txpackets--; + if (np->catch_tx_desc->skb) + dev_kfree_skb(np->catch_tx_desc->skb); + + np->catch_tx_desc->skb = 0; + np->catch_tx_desc = + phys_to_virt((int)np->catch_tx_desc->descr.next); + } + + WARN_ON(np->txpackets != 0); + np->txpackets = 0; + + /* reset tx-ring */ + np->dma_tx_descr_list[0].descr.buf = + np->dma_tx_descr_list[0].descr.after = 0; + np->dma_tx_descr_list[0].descr.eol = 1; + + np->active_tx_desc = &np->dma_tx_descr_list[0]; + np->prev_tx_desc = &np->dma_tx_descr_list[NBR_TX_DESC - 1]; + np->catch_tx_desc = &np->dma_tx_descr_list[0]; + + np->ctxt_out.next = 0; + np->ctxt_out.saved_data = + (void *)virt_to_phys(&np->dma_tx_descr_list[0].descr); + +} + +static void crisv32_eth_reset_rings(struct net_device *dev) +{ + crisv32_eth_reset_tx_ring(dev); + crisv32_eth_reset_rx_ring(dev); +} + +/* + * Really advance the receive ring. RX interrupts must be off. + */ +static void __crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np) +{ + if (np->newbuf) + np->active_rx_desc->descr.buf = (void *) np->newbuf; + np->active_rx_desc->descr.after = + np->active_rx_desc->descr.buf + MAX_MEDIA_DATA_SIZE; + np->active_rx_desc->descr.eol = 1; + np->active_rx_desc->descr.in_eop = 0; + np->active_rx_desc = phys_to_virt((int)np->active_rx_desc->descr.next); + barrier(); + np->prev_rx_desc->descr.eol = 0; + + /* Workaround cache bug. */ + flush_dma_descr(&np->prev_rx_desc->descr, 0); + np->prev_rx_desc = phys_to_virt((int)np->prev_rx_desc->descr.next); + flush_dma_descr(&np->prev_rx_desc->descr, 1); +} + +/* + * Advance the receive ring. RX interrupts must be off. + */ +static inline void +crisv32_eth_rx_ring_advance(struct crisv32_ethernet_local *np) +{ + /* + * When the input DMA reaches eol precaution must be taken, otherwise + * the DMA could stop. The problem occurs if the eol flag is re-placed + * on the descriptor that the DMA stands on before the DMA proceed to + * the next descriptor. This case could, for example, happen if there + * is a traffic burst and then the network goes silent. To prevent this + * we make sure that we do not set the eol flag on the descriptor that + * the DMA stands on. + */ + unsigned long dma_pos; + + /* Get the current input dma position. */ + dma_pos = REG_RD_INT(dma, np->dma_in_inst, rw_saved_data); + + if (virt_to_phys(&np->active_rx_desc->descr) != dma_pos) { + crisv32_eth_descr *cur, *nxt; + + /* Now really advance the ring one step. */ + __crisv32_eth_rx_ring_advance(np); + + cur = np->active_rx_desc; + nxt = (void *)phys_to_virt((unsigned long)cur->descr.next); + flush_dma_descr(&cur->descr, 0); + flush_dma_descr(&nxt->descr, 0); + if (!cur->descr.in_eop && nxt->descr.in_eop) { + /* TODO: Investigate this more. The DMA seems to have + skipped a descriptor, possibly due to incoherence + between the CPU L1 cache and the DMA updates to the + descriptor. */ + np->newbuf = (unsigned long) np->active_rx_desc->descr.buf; + __crisv32_eth_rx_ring_advance(np); + } + /* flush after peek. */ + flush_dma_descr(&cur->descr, 0); + flush_dma_descr(&nxt->descr, 0); + } else { + /* delay the advancing of the ring. */ + np->new_rx_package = 0; + } +} + +static void __init crisv32_eth_init_rings(struct net_device *dev) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev); + int i; + + /* Initialise receive descriptors for interface. */ + for (i = 0; i < NBR_RX_DESC; i++) { + struct sk_buff *skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE); + + np->dma_rx_descr_list[i].skb = skb; + np->dma_rx_descr_list[i].descr.buf = + (char*)virt_to_phys(skb->data); + np->dma_rx_descr_list[i].descr.after = + (char*)virt_to_phys(skb->data + MAX_MEDIA_DATA_SIZE); + + np->dma_rx_descr_list[i].descr.eol = 0; + np->dma_rx_descr_list[i].descr.in_eop = 0; + np->dma_rx_descr_list[i].descr.next = + (void *) virt_to_phys(&np->dma_rx_descr_list[i + 1].descr); + } + /* bend the list into a ring */ + np->dma_rx_descr_list[NBR_RX_DESC - 1].descr.next = + (void *) virt_to_phys(&np->dma_rx_descr_list[0].descr); + + /* Initialize transmit descriptors. */ + for (i = 0; i < NBR_TX_DESC; i++) { + np->dma_tx_descr_list[i].descr.wait = 1; + np->dma_tx_descr_list[i].descr.eol = 0; + np->dma_tx_descr_list[i].descr.out_eop = 0; + np->dma_tx_descr_list[i].descr.next = + (void*)virt_to_phys(&np->dma_tx_descr_list[i+1].descr); + } + /* bend the list into a ring */ + np->dma_tx_descr_list[NBR_TX_DESC - 1].descr.next = + (void *) virt_to_phys(&np->dma_tx_descr_list[0].descr); + + crisv32_eth_reset_rings(dev); +} + +static void __init crisv32_init_leds(int ledgrp, struct net_device *dev) +{ + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0); + struct crisv32_ethernet_local *np = netdev_priv(dev); + + /* Use already allocated led grp if initialized */ + if (crisv32_leds[ledgrp] != NULL) { + np->leds = crisv32_leds[ledgrp]; + return; + } + + crisv32_leds[ledgrp] = + kmalloc(sizeof(struct crisv32_eth_leds), GFP_KERNEL); + + crisv32_leds[ledgrp]->ledgrp = ledgrp; + crisv32_leds[ledgrp]->led_active = 0; + crisv32_leds[ledgrp]->ifisup[0] = 0; + crisv32_leds[ledgrp]->ifisup[1] = 0; + /* NOTE: Should this value be set to zero as the jiffies timer + can wrap? */ + crisv32_leds[ledgrp]->led_next_time = jiffies; + + crisv32_leds[ledgrp]->clear_led_timer = timer_init; + crisv32_leds[ledgrp]->clear_led_timer.function = + crisv32_clear_network_leds; + crisv32_leds[ledgrp]->clear_led_timer.data = (unsigned long) dev; + + spin_lock_init(&crisv32_leds[ledgrp]->led_lock); + + np->leds = crisv32_leds[ledgrp]; +} + +static int __init crisv32_ethernet_init(void) +{ + struct crisv32_ethernet_local *np; + int ret = 0; + +#ifdef CONFIG_ETRAXFS + printk("ETRAX FS 10/100MBit ethernet v0.01 (c)" + " 2003 Axis Communications AB\n"); +#else + printk("ARTPEC-3 10/100 MBit ethernet (c)" + " 2003-2009 Axis Communications AB\n"); +#endif + +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + { + reg_clkgen_rw_clk_ctrl clk_ctrl = REG_RD(clkgen, regi_clkgen, + rw_clk_ctrl); + clk_ctrl.eth = clk_ctrl.dma0_1_eth = regk_clkgen_yes; + REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); + } +#endif +{ + int iface0 = 0; + +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + if (crisv32_pinmux_alloc_fixed(pinmux_eth)) + panic("Eth pinmux\n"); +#endif + + if (!(crisv32_dev[iface0] = alloc_etherdev(sizeof *np))) + return -ENOMEM; + + ret |= crisv32_ethernet_device_init(crisv32_dev[iface0]); + + crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface0]); + + np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface0]); + np->eth_inst = regi_eth0; + np->dma_out_inst = regi_dma0; + np->dma_in_inst = regi_dma1; + + np->mii_if.dev = crisv32_dev[iface0]; + np->mii_if.mdio_read = crisv32_eth_get_mdio_reg; + np->mii_if.mdio_write = crisv32_eth_set_mdio_reg; + np->mii_if.phy_id_mask = 0x1f; + np->mii_if.reg_num_mask = 0x1f; + + np->use_leds = 1; + np->autoneg_normal = 1; + + + register_netdev(crisv32_dev[iface0]); + + /* Set up default MAC address */ + memcpy(crisv32_dev[iface0]->dev_addr, default_mac_iface0.sa_data, 6); + crisv32_eth_set_mac_address(crisv32_dev[iface0], &default_mac_iface0); + if (crisv32_eth_request_irqdma(crisv32_dev[iface0])) + printk("%s: eth0 unable to allocate IRQ and DMA resources\n", + __func__); + np->txpackets = 0; + crisv32_eth_init_rings(crisv32_dev[iface0]); + crisv32_eth_setup_controller(crisv32_dev[iface0]); + ret |= crisv32_eth_init_phy(crisv32_dev[iface0]); + if (ret) { + unregister_netdev(crisv32_dev[iface0]); + return ret; + } +} + +#ifdef CONFIG_ETRAX_ETHERNET_IFACE1 +{ + int iface1 = 0; + /* Default MAC address for interface 1. + * The real one will be set later. */ + static struct sockaddr default_mac_iface1 = + {0, {0x00, 0x40, 0x8C, 0xCD, 0x00, 0x01}}; + + if (crisv32_pinmux_alloc_fixed(pinmux_eth1)) + panic("Eth pinmux\n"); + + /* Increase index to device array if interface 0 is enabled as well.*/ +#ifdef CONFIG_ETRAX_ETHERNET_IFACE0 + iface1++; +#endif + if (!(crisv32_dev[iface1] = alloc_etherdev(sizeof *np))) + return -ENOMEM; + + ret |= crisv32_ethernet_device_init(crisv32_dev[iface1]); + + crisv32_init_leds(CRIS_LED_GRP_NONE,crisv32_dev[iface1]); + + np = (struct crisv32_ethernet_local *) netdev_priv(crisv32_dev[iface1]); + np->eth_inst = regi_eth1; + np->dma_out_inst = regi_dma6; + np->dma_in_inst = regi_dma7; + + np->mii_if.dev = crisv32_dev[iface1]; + np->mii_if.mdio_read = crisv32_eth_get_mdio_reg; + np->mii_if.mdio_write = crisv32_eth_set_mdio_reg; + np->mii_if.phy_id_mask = 0x1f; + np->mii_if.reg_num_mask = 0x1f; + + + register_netdev(crisv32_dev[iface1]); + + /* Set up default MAC address */ + memcpy(crisv32_dev[iface1]->dev_addr, default_mac_iface1.sa_data, 6); + crisv32_eth_set_mac_address(crisv32_dev[iface1], &default_mac_iface1); + + if (crisv32_eth_request_irqdma(crisv32_dev[iface1])) + printk("%s: eth1 unable to allocate IRQ and DMA resources\n", + __func__); + np->txpackets = 0; + crisv32_eth_init_rings(crisv32_dev[iface1]); + crisv32_eth_setup_controller(crisv32_dev[iface1]); + ret |= crisv32_eth_init_phy(crisv32_dev[iface1]); + if (ret) { + unregister_netdev(crisv32_dev[iface1]); + return ret; + } +} +#endif /* CONFIG_ETRAX_ETHERNET_IFACE1 */ + +#ifdef CONFIG_CPU_FREQ + cpufreq_register_notifier(&crisv32_ethernet_freq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); +#endif + + return ret; +} + +static struct net_device_ops crisv32_netdev_ops = { + .ndo_open = crisv32_eth_open, + .ndo_stop = crisv32_eth_close, + .ndo_start_xmit = crisv32_eth_send_packet, + .ndo_set_rx_mode = crisv32_eth_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = crisv32_eth_set_mac_address, + .ndo_do_ioctl =crisv32_eth_ioctl, + .ndo_get_stats = crisv32_get_stats, + .ndo_tx_timeout = crisv32_eth_do_tx_recovery, + .ndo_set_config = crisv32_eth_set_config, +}; + +static int __init crisv32_ethernet_device_init(struct net_device *dev) +{ + struct crisv32_ethernet_local *np; + struct timer_list timer_init = TIMER_INITIALIZER(NULL, 0, 0); + + dev->base_addr = 0; /* Just to have something to show. */ + + /* we do our own locking */ + dev->features |= NETIF_F_LLTX; + + /* We use several IRQs and DMAs so just report 0 here. */ + dev->irq = 0; + dev->dma = 0; + + /* + * Fill in our handlers so the network layer can talk to us in the + * future. + */ + dev->netdev_ops = &crisv32_netdev_ops; + dev->ethtool_ops = &crisv32_ethtool_ops; + dev->watchdog_timeo = HZ * 10; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = crisv32_netpoll; +#endif + np = netdev_priv(dev); + np->dev = dev; + + /* + * 8 skbs keeps the system very reponsive even under high load. + * At 64 the system locks, pretty much the same way as without NAPI. + * + * TODO: meassure with 2 interfaces + */ + netif_napi_add(dev, &np->napi, crisv32_eth_poll, 8); + + spin_lock_init(&np->lock); + spin_lock_init(&np->transceiver_lock); + + np->receive_timer = timer_init; + np->receive_timer.data = (unsigned)dev; + np->receive_timer.function = receive_timeout; + + INIT_WORK(&np->receive_work, receive_timeout_work); + + np->transmit_timer = timer_init; + np->transmit_timer.data = (unsigned)dev; + np->transmit_timer.function = transmit_timeout; + + return 0; +} + +static int crisv32_eth_open(struct net_device *dev) +{ + struct sockaddr mac_addr; + reg_dma_rw_ack_intr ack_intr = { .data = 1, .in_eop = 1 }; + reg_eth_rw_clr_err clr_err = {.clr = regk_eth_yes}; + /* + * dont interrupt us at any stat counter thresholds, only at urun + * and exc_col. + */ +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + /* For Artpec-3 we use overrun to workaround voodoo TR 87 */ + int intr_mask_nw = 0x1c00; +#else + int intr_mask_nw = 0x1800; +#endif + int eth_ack_intr = 0xffff; + struct crisv32_ethernet_local *np = netdev_priv(dev); + + spin_lock(&np->lock); + crisv32_eth_set_gigabit(np, 0); + + crisv32_disable_tx_ints(np); + crisv32_disable_rx_ints(np); + + REG_WR(eth, np->eth_inst, rw_clr_err, clr_err); + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, eth_ack_intr); + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, intr_mask_nw); + crisv32_eth_reset_rings(dev); + + /* Give the hardware an idea of what MAC address we want. */ + memcpy(mac_addr.sa_data, dev->dev_addr, dev->addr_len); + crisv32_eth_set_mac_address(dev, &mac_addr); + + /* Enable irq and make sure that the irqs are cleared. */ + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr); + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr); + + crisv32_disconnect_eth_rx_dma(np); + + /* Prepare input DMA. */ + DMA_RESET(np->dma_in_inst); + DMA_ENABLE(np->dma_in_inst); +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + DMA_WR_CMD(np->dma_in_inst, regk_dma_set_w_size2); +#endif + DMA_START_CONTEXT(np->dma_in_inst, virt_to_phys(&np->ctxt_in)); + DMA_CONTINUE(np->dma_in_inst); + crisv32_enable_rx_ints(np); + crisv32_start_receiver(np); + + /* Prepare output DMA. */ + DMA_RESET(np->dma_out_inst); + DMA_ENABLE(np->dma_out_inst); +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + DMA_WR_CMD(np->dma_out_inst, regk_dma_set_w_size4); +#endif + crisv32_connect_eth_rx_dma(np); + + netif_start_queue(dev); + crisv32_enable_tx_ints(np); + + if (!np->fixed_phy) { + /* Start duplex/speed timers */ + if (!timer_pending(&np->speed_timer)) + add_timer(&np->speed_timer); + if (!timer_pending(&np->duplex_timer)) + add_timer(&np->duplex_timer); + } + + spin_unlock(&np->lock); + /* + * We are now ready to accept transmit requests from the queueing + * layer of the networking. + */ + np->link = 1; + netif_carrier_on(dev); + napi_enable(&np->napi); + + return 0; +} + +static int crisv32_eth_close(struct net_device *dev) +{ + reg_dma_rw_ack_intr ack_intr = {0}; + + struct crisv32_ethernet_local *np = netdev_priv(dev); + unsigned long flags; + + del_timer(&np->transmit_timer); + spin_lock_irqsave(&np->lock, flags); + + /* stop the receiver before the DMA channels to avoid overruns. */ + crisv32_disable_rx_ints(np); + napi_disable(&np->napi); + crisv32_stop_receiver(np); + + netif_stop_queue(dev); + + /* Reset the TX DMA in case it has hung on something. */ + DMA_RESET(np->dma_in_inst); + + /* Stop DMA */ + DMA_STOP(np->dma_in_inst); + DMA_STOP(np->dma_out_inst); + + /* Disable irq and make sure that the irqs are cleared. */ + crisv32_disable_tx_ints(np); + ack_intr.data = 1; + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr); + + ack_intr.in_eop = 1; + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr); + + np->sender_started = 0; + spin_unlock_irqrestore(&np->lock, flags); + + /* Update the statistics. */ + update_rx_stats(np); + update_tx_stats(np); + + if (!np->fixed_phy) { + /* Stop speed/duplex timers */ + del_timer(&np->speed_timer); + del_timer(&np->duplex_timer); + } + + return 0; +} + +static int crisv32_eth_set_mac_address(struct net_device *dev, void *vpntr) +{ + int i; + static int first = 1; + + unsigned char *addr = ((struct sockaddr*)vpntr)->sa_data; + + reg_eth_rw_ma0_lo ma0_lo = + { addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24)}; + + reg_eth_rw_ma0_hi ma0_hi = { addr[4] | (addr[5] << 8) }; + + struct crisv32_ethernet_local *np = netdev_priv(dev); + + /* Remember the address. */ + memcpy(dev->dev_addr, addr, dev->addr_len); + + /* + * Write the address to the hardware. + * Note the way the address is wrapped: + * ma0_l0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24); + * ma0_hi = a0_4 | (a0_5 << 8); + */ + REG_WR(eth, np->eth_inst, rw_ma0_lo, ma0_lo); + REG_WR(eth, np->eth_inst, rw_ma0_hi, ma0_hi); + + if (first) { + printk(KERN_INFO "%s: changed MAC to ", dev->name); + + for (i = 0; i < 5; i++) + printk("%02X:", dev->dev_addr[i]); + printk("%02X\n", dev->dev_addr[i]); + + first = 0; + } + + return 0; +} + +static irqreturn_t crisv32rx_eth_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct crisv32_ethernet_local *np = netdev_priv(dev); + reg_dma_r_masked_intr masked_in; + + masked_in = REG_RD(dma, np->dma_in_inst, r_masked_intr); + + if (masked_in.in_eop) { + reg_dma_rw_ack_intr ack_intr = {0}; + + /* + * Ack the rx irq even if we are not prepared to start + * polling. This is needed to handle incomming packets + * during the stop sequence. + */ + ack_intr.in_eop = 1; + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr); + + mod_timer(&np->receive_timer, jiffies + HZ); + np->do_rx_recovery = 0; + + if (napi_schedule_prep(&np->napi)) { + crisv32_disable_rx_ints(np); + crisv32_disable_tx_ints(np); + /* put us onto the poll list */ + __napi_schedule(&np->napi); + } + } else { + /* Unexpected, ACK it and hope for the best. */ + reg_dma_rw_ack_intr ack_intr = { + .group = 1, + .ctxt = 1, + .data = 1, + .in_eop = 0, + .stream_cmd = 1, + .dummy1 = ~0 + }; + REG_WR(dma, np->dma_in_inst, rw_ack_intr, ack_intr); + } + + return IRQ_HANDLED; +} + +static inline void crisv32_eth_roll_tx_timer(struct crisv32_ethernet_local *np) +{ + /* If there are more packets in the ring, roll the tx timer. */ + if (np->txpackets) { + /* Eth pause frames may halt us for up to 320ms (100mbit). */ + unsigned long timeout = jiffies + (HZ / 3) + 1; + mod_timer(&np->transmit_timer, timeout); + } + else + del_timer(&np->transmit_timer); +} + +/* Call with np->lock held. */ +static void _crisv32_tx_ring_advance(struct crisv32_ethernet_local *np, + int cleanup) +{ + reg_dma_rw_stat stat; + dma_descr_data *dma_pos; + struct net_device *dev = np->dev; + int eol; + + /* Get the current output dma position. */ + dma_pos = phys_to_virt(REG_RD_INT(dma, np->dma_out_inst, rw_data)); + stat = REG_RD(dma, np->dma_out_inst, rw_stat); + eol = stat.list_state == regk_dma_data_at_eol; + if (cleanup || eol) + dma_pos = &np->active_tx_desc->descr; + + /* Take care of transmited dma descriptors and report sent packet. */ + while (np->txpackets && (&np->catch_tx_desc->descr != dma_pos)) { + /* Update sent packet statistics. */ + np->stats.tx_bytes += np->catch_tx_desc->skb->len; + np->stats.tx_packets++; + + dev_kfree_skb_any(np->catch_tx_desc->skb); + np->catch_tx_desc->skb = 0; + np->txpackets--; + np->catch_tx_desc->descr.buf = 0; + np->catch_tx_desc = + phys_to_virt((int)np->catch_tx_desc->descr.next); + np->do_tx_recovery = 0; + np->retrans = 0; + + netif_wake_queue(dev); + } +} + +static inline void crisv32_tx_ring_advance(struct crisv32_ethernet_local *np) +{ + unsigned long flags; + + spin_lock_irqsave(&np->lock, flags); + _crisv32_tx_ring_advance(np, 0); + crisv32_eth_roll_tx_timer(np); + spin_unlock_irqrestore(&np->lock, flags); +} + +static inline int crisv32_tx_complete(struct crisv32_ethernet_local *np) +{ + reg_dma_rw_ack_intr ack_intr = { .data = 1 }; + reg_dma_r_intr ints; + int r = 0; + + /* We are interested in the unmasked raw interrupt source here. When + polling with tx interrupts masked off we still want to do + tx completition when the DMA makes progress. */ + ints = REG_RD(dma, np->dma_out_inst, r_intr); + if (ints.data) + { + /* ack the interrupt, if it was active */ + REG_WR(dma, np->dma_out_inst, rw_ack_intr, ack_intr); + crisv32_tx_ring_advance(np); + r = 1; + } + return r; +} + +static irqreturn_t crisv32tx_eth_interrupt(int irq, void *dev_id) +{ + struct crisv32_ethernet_local *np = netdev_priv(dev_id); + + crisv32_tx_complete(np); + return IRQ_HANDLED; +} + + +/* Update receive errors. */ +static void +update_rx_stats(struct crisv32_ethernet_local *np) +{ + reg_eth_rs_rec_cnt r; + + r = REG_RD(eth, np->eth_inst, rs_rec_cnt); + + np->stats.rx_over_errors += r.congestion; + np->stats.rx_crc_errors += r.crc_err; + np->stats.rx_frame_errors += r.align_err; + np->stats.rx_length_errors += r.oversize; + np->stats.rx_errors += r.crc_err + r.align_err + + r.oversize + r.congestion; +} + +/* Update transmit errors. */ +static void update_tx_stats(struct crisv32_ethernet_local *np) +{ + reg_eth_rs_tr_cnt r; + reg_eth_rs_phy_cnt rp; + + r = REG_RD(eth, np->eth_inst, rs_tr_cnt); + rp = REG_RD(eth, np->eth_inst, rs_phy_cnt); + + /* r.deferred is not good for counting collisions because it also + includes frames that have to wait for the interframe gap. That + means we get deferred frames even when in full duplex. + Here we don't actually count the number of collisions that + occured (artpec3 seems to lack such a counter), instead we count + the number of frames that collide once or more. */ + np->stats.collisions += r.mult_col + r.single_col; + np->stats.tx_window_errors += r.late_col; + np->stats.tx_carrier_errors += rp.carrier_loss; + + /* Ordinary collisions are not errors, they are just part of + ethernet's bus arbitration and congestion control mechanisms. + Late collisions are serious errors though. */ + np->stats.tx_errors += r.late_col; +} + +/* Get current statistics. */ +static struct net_device_stats *crisv32_get_stats(struct net_device *dev) +{ + unsigned long flags; + struct crisv32_ethernet_local *np = netdev_priv(dev); + + spin_lock_irqsave(&np->lock, flags); + + update_rx_stats(np); + update_tx_stats(np); + + spin_unlock_irqrestore(&np->lock, flags); + + return &np->stats; +} + +/* Check for network errors. This acknowledge the received interrupt. */ +static irqreturn_t crisv32nw_eth_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct crisv32_ethernet_local *np = netdev_priv(dev); + reg_eth_r_masked_intr intr_mask; + int ack_intr = 0xffff; + reg_eth_rw_clr_err clr_err; + + intr_mask = REG_RD(eth, np->eth_inst, r_masked_intr); + +#ifdef CONFIG_CRIS_MACH_ARTPEC3 + /* Only apply the workaround if it is not already pending. + enable_eth_ints will re-enable the orun interrupt regardless + of pending_overrun. */ + if (intr_mask.orun && !np->pending_overrun) { + reg_eth_rw_rec_ctrl rec_ctrl = + REG_RD(eth, np->eth_inst, rw_rec_ctrl); + np->saved_rec_ctrl = rec_ctrl; + np->overrun_set = 1; + DMA_STOP(np->dma_in_inst); + rec_ctrl.ma0 = regk_eth_no; + rec_ctrl.broadcast = regk_eth_no; + REG_WR(eth, np->eth_inst, rw_rec_ctrl, rec_ctrl); + np->saved_ga_lo = REG_RD_INT(eth, np->eth_inst, rw_ga_lo); + np->saved_ga_hi = REG_RD_INT(eth, np->eth_inst, rw_ga_hi); + REG_WR_INT(eth, np->eth_inst, rw_ga_lo, 0); + REG_WR_INT(eth, np->eth_inst, rw_ga_hi, 0); + REG_WR_INT(eth, np->eth_inst, rw_intr_mask, + REG_RD_INT(eth, np->eth_inst, rw_intr_mask) & 0xfbff); + REG_WR_INT(eth, np->eth_inst, rw_ack_intr, 0x400); + intr_mask.orun = 0; + np->pending_overrun = 1; + if (!np->napi_processing) + crisv32_eth_restart_rx_dma(np->dev, np); + + return IRQ_HANDLED; + } +#endif + + /* + * Check for underrun and/or excessive collisions. Note that the + * rw_clr_err register clears both underrun and excessive collision + * errors, so there's no need to check them separately. + */ + if (np->sender_started + && (intr_mask.urun || intr_mask.exc_col)) { + unsigned long flags; + + /* Underrun are considered to be tx-errors. */ + np->stats.tx_errors += intr_mask.urun; + np->stats.tx_fifo_errors += intr_mask.urun; + + /* + * Protect against the tx-interrupt messing with + * the tx-ring. + */ + spin_lock_irqsave(&np->lock, flags); + + /* DMA should have stopped now, eat from the ring before + removing anything due to tx errors. */ + _crisv32_tx_ring_advance(np, 0); + + /* + * Drop packets after 15 retries. + * TODO: Add backoff. + */ + if (np->retrans > 15 && np->txpackets) { + dev_kfree_skb_irq(np->catch_tx_desc->skb); + np->catch_tx_desc->skb = 0; + np->catch_tx_desc->descr.buf = 0; + np->catch_tx_desc = + phys_to_virt((int) + np->catch_tx_desc->descr.next); + flush_dma_descr(&np->catch_tx_desc->descr, 0); + + np->txpackets--; + np->retrans = 0; + netif_wake_queue(dev); + np->stats.tx_dropped++; + } + np->ctxt_out.next = 0; + if (np->txpackets) { + np->retrans++; + np->ctxt_out.saved_data = (void *) + virt_to_phys(&np->catch_tx_desc->descr); + np->ctxt_out.saved_data_buf = + np->catch_tx_desc->descr.buf; + WARN_ON(!np->ctxt_out.saved_data_buf); + flush_dma_descr(&np->catch_tx_desc->descr, 0); + cris_flush_cache_range(&np->ctxt_out, + sizeof np->ctxt_out); + + /* restart the DMA */ + DMA_START_CONTEXT(np->dma_out_inst, + (int) virt_to_phys(&np->ctxt_out)); + np->sender_started = 1; + } + else { + /* Load dummy context but do not load the data + descriptor nor start the burst. This brings the + buggy eth transmitter back in sync with the DMA + avoiding malformed frames. */ + REG_WR(dma, np->dma_out_inst, rw_group_down, + (int) virt_to_phys(&np->ctxt