summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-eval.dts225
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts225
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-displays.dtsi12
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-fixed.dtsi229
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-gpio.dtsi66
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-keys.dtsi24
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pinmux.dtsi1516
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pmic.dtsi498
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-displays.dtsi12
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-fixed.dtsi229
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-gpio.dtsi66
-rw-r--r--arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-pinmux.dtsi1522
-rw-r--r--arch/arm/configs/apalis-tk1_defconfig443
-rw-r--r--arch/arm/include/asm/ftrace.h2
-rw-r--r--arch/arm/kernel/return_address.c5
-rw-r--r--arch/arm/mach-tegra/Kconfig11
-rw-r--r--arch/arm/mach-tegra/Makefile12
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-memory.c2780
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-panel.c854
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-power.c388
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-sdhci.c270
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-sensors.c2072
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1-sysedp.c193
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1.c645
-rw-r--r--arch/arm/mach-tegra/board-apalis-tk1.h162
-rw-r--r--arch/arm/mach-tegra/board-ardbeg-sensors.c249
-rw-r--r--arch/arm/mach-tegra/board-ardbeg.c17
-rw-r--r--arch/arm/mach-tegra/common.c11
-rw-r--r--arch/arm/mach-tegra/include/mach/dc.h7
-rw-r--r--arch/arm/mach-tegra/panel-a-edp-1080p-14-0.c25
-rw-r--r--arch/arm/mach-tegra/panel-c-lvds-1366-14.c34
-rw-r--r--arch/arm/mach-tegra/powergate-t12x.c2
-rw-r--r--arch/arm/mach-tegra/tegra11_soctherm.c3
-rw-r--r--arch/arm/mach-tegra/tegra12_clocks.c2
-rw-r--r--arch/arm/mach-tegra/tegra_cl_dvfs.c4
-rw-r--r--arch/arm/tools/mach-types1
-rw-r--r--drivers/base/regmap/regmap-irq.c26
-rw-r--r--drivers/base/regmap/regmap.c4
-rw-r--r--drivers/gpio/Kconfig6
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-apalis-tk1-k20.c221
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c1
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c20
-rw-r--r--drivers/iio/adc/Kconfig6
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/apalis-tk1-k20_adc.c200
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/apalis-tk1-k20_ts.c235
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig18
-rw-r--r--drivers/media/i2c/soc_camera/Makefile3
-rw-r--r--drivers/media/i2c/soc_camera/adv7280.c834
-rw-r--r--drivers/media/i2c/soc_camera/ap1302.c558
-rw-r--r--drivers/media/i2c/soc_camera/ov5640.c1842
-rw-r--r--drivers/media/i2c/soc_camera/tc358743.c1104
-rw-r--r--drivers/media/platform/soc_camera/tegra_camera/common.c38
-rw-r--r--drivers/media/platform/soc_camera/tegra_camera/common.h6
-rw-r--r--drivers/media/platform/soc_camera/tegra_camera/vi2.c80
-rw-r--r--drivers/media/platform/tegra/ad5816.c4
-rw-r--r--drivers/media/platform/tegra/dw9718.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c4
-rw-r--r--drivers/mfd/Kconfig14
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/apalis-tk1-k20-ezp.h47
-rw-r--r--drivers/mfd/apalis-tk1-k20.c1084
-rw-r--r--drivers/mmc/host/sdhci-tegra.c43
-rw-r--r--drivers/net/can/Kconfig6
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/apalis-tk1-k20-can.c836
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c3072
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h579
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_api.c1161
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_api.h152
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1834
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h366
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c767
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1727
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h118
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_manage.c551
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_manage.h86
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c270
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h59
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c678
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_osdep.h141
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2378
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h332
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h918
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h680
-rw-r--r--drivers/net/ethernet/intel/igb/igb_debugfs.c26
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2104
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c48
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7103
-rw-r--r--drivers/net/ethernet/intel/igb/igb_param.c872
-rw-r--r--drivers/net/ethernet/intel/igb/igb_procfs.c356
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c457
-rw-r--r--drivers/net/ethernet/intel/igb/igb_regtest.h256
-rw-r--r--drivers/net/ethernet/intel/igb/igb_vmdq.c433
-rw-r--r--drivers/net/ethernet/intel/igb/igb_vmdq.h43
-rw-r--r--drivers/net/ethernet/intel/igb/kcompat.c2082
-rw-r--r--drivers/net/ethernet/intel/igb/kcompat.h5075
-rw-r--r--drivers/net/ethernet/intel/igb/kcompat_ethtool.c1169
-rw-r--r--drivers/pci/host/pci-tegra.c198
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/pinctrl/pinctrl-tegra124.c124
-rw-r--r--drivers/rtc/rtc-ds1307.c12
-rw-r--r--drivers/spi/spi-tegra114.c4
-rw-r--r--drivers/usb/gadget/tegra_udc.c4
-rw-r--r--drivers/video/logo/Kconfig4
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo.c4
-rw-r--r--drivers/video/tegra/dc/bandwidth.c3
-rw-r--r--drivers/video/tegra/dc/dc.c20
-rw-r--r--drivers/video/tegra/dc/dp.c11
-rw-r--r--drivers/video/tegra/dc/dsi.c2
-rw-r--r--drivers/video/tegra/dc/lvds.c20
-rw-r--r--drivers/video/tegra/dc/lvds.h1
-rw-r--r--drivers/video/tegra/dc/mode.c218
-rw-r--r--drivers/video/tegra/dc/sor.c43
-rw-r--r--drivers/video/tegra/dc/sor_regs.h1
-rw-r--r--drivers/video/tegra/fb.c198
-rw-r--r--include/linux/compiler-gcc.h114
-rw-r--r--include/linux/compiler-gcc3.h23
-rw-r--r--include/linux/compiler-gcc4.h88
-rw-r--r--include/linux/linux_logo.h1
-rw-r--r--include/linux/mfd/apalis-tk1-k20.h215
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/soc/tegra/Kconfig13
-rw-r--r--sound/soc/tegra/Makefile2
-rw-r--r--sound/soc/tegra/apalis-tk1.c423
135 files changed, 48365 insertions, 9497 deletions
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index ad6a73852f08..7a5c7f4dd83f 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -54,6 +54,7 @@ ramtron,24c64 i2c serial eeprom (24cxx)
ricoh,rs5c372a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
samsung,24ad0xd1 S524AD0XF1 (128K/256K-bit Serial EEPROM for Low Power)
st-micro,24c256 i2c serial eeprom (24cxx)
+st,m41t0 Serial real-time clock (RTC)
stm,m41t00 Serial Access TIMEKEEPER
stm,m41t62 Serial real-time clock (RTC) with alarm
stm,m41t80 M41T80 - SERIAL ACCESS RTC WITH ALARMS
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a5d9cefc9155..aa3fe30e6d4b 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -59,6 +59,14 @@ endif
comma = ,
+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index ba799ea324ea..9a905903793a 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -200,6 +200,8 @@ dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
tegra124-bonaire_sim.dtb \
tegra124-bonaire.dtb \
tegra124-ardbeg.dtb \
+ tegra124-apalis-eval.dtb \
+ tegra124-apalis-v1.2-eval.dtb \
tegra124-ardbeg-a03-00.dtb \
tegra124-ardbeg-e1792-1100-a00-00.dtb \
tegra124-ardbeg-e1792-1100-a00-01.dtb \
diff --git a/arch/arm/boot/dts/tegra124-apalis-eval.dts b/arch/arm/boot/dts/tegra124-apalis-eval.dts
new file mode 100644
index 000000000000..a91852377643
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-apalis-eval.dts
@@ -0,0 +1,225 @@
+/dts-v1/;
+
+#include "tegra124.dtsi"
+#include "tegra124-platforms/tegra124-apalis-displays.dtsi"
+#include "tegra124-platforms/tegra124-apalis-keys.dtsi"
+#include "tegra124-platforms/tegra124-apalis-gpio.dtsi"
+#include "tegra124-platforms/tegra124-apalis-pinmux.dtsi"
+#include "tegra124-platforms/tegra124-apalis-pmic.dtsi"
+#include "tegra124-platforms/tegra124-apalis-fixed.dtsi"
+
+/ {
+ model = "Toradex Apalis TK1 on Apalis Evaluation Board";
+ compatible = "toradex,apalis-tk1-eval", "toradex,apalis-tk1",
+ "nvidia,tegra124";
+ nvidia,dtsfilename = __FILE__;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ bootargs = "tegraid=40.0.0.00.00 vmalloc=256M video=tegrafb console=ttyS0,115200n8 earlyprintk";
+ };
+
+ psci {
+ status = "disabled";
+ };
+
+ /* TBD */
+ pinmux {
+ pinctrl-names = "default", "drive", "unused", "suspend";
+ pinctrl-3 = <&pinmux_suspend>;
+
+ /* Change the pin dap_mclk1_req to required configurations */
+ unused_lowpower {
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "sata";
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ /* On suspend, make dap_mclk1_req to pull up */
+ pinmux_suspend: pins_on_suspend {
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ };
+ };
+ };
+
+ /* Apalis UART1 */
+ serial@70006000 {
+ compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart";
+ status = "okay";
+ };
+
+ /* Apalis UART2 */
+ serial@70006040 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /* Apalis UART3 */
+ serial@70006200 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /* Apalis UART4 */
+ serial@70006300 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /*
+ * GEN1_I2C: I2C1_SDA/SCL on MXM3 pin 209/211 (e.g. RTC on carrier
+ * board)
+ */
+ i2c@7000c000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ pcie-switch@58 {
+ compatible = "plx,pex8605";
+ reg = <0x58>;
+ };
+
+ /* M41T0M6 real time clock on carrier board */
+ rtc@68 {
+ compatible = "st,m41t0";
+ reg = <0x68>;
+ };
+ };
+
+ /* PWR_I2C: power I2C to audio codec, PMIC and temperature sensor */
+ i2c@7000d000 {
+ nvidia,bit-banging-xfer-after-shutdown;
+ };
+
+ memory@0x80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ /* TBD */
+ camera-pcl {
+ profiles {
+ ov4689@2_0036 {
+ use_of_node = "yes";
+ reset-gpios = <&gpio TEGRA_GPIO(BB, 3) 0>;
+ cam1-gpios = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ };
+ imx185@2_001A {
+ use-of-node = "yes";
+ cam1-gpios = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ };
+ };
+ dpd {
+ default-enable;
+ };
+ };
+
+ /* SPI1: Apalis SPI1 */
+ spi@7000d400 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+
+ spidev0: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ };
+ };
+
+ /* SPI2: MCU SPI */
+ spi@7000d600 {
+ status = "okay";
+ spi-max-frequency = <6000000>;
+
+ k20mcu: apalis-tk1-k20@1 {
+ compatible = "toradex,apalis-tk1-k20";
+ reg = <1>;
+ spi-max-frequency = <6000000>;
+ interrupt-parent =<&gpio>;
+ interrupts = <TEGRA_GPIO(K, 2) IRQ_TYPE_EDGE_FALLING>;
+ rst-gpio = <&gpio TEGRA_GPIO(BB, 6) GPIO_ACTIVE_HIGH>;
+
+ /* GPIO based CS used to enter K20 EzPort mode */
+ ezport-cs-gpio = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ /* extra INT lines between K20 and TK1 */
+ int2-gpio = <&gpio TEGRA_GPIO(J, 2) GPIO_ACTIVE_HIGH>;
+ int3-gpio = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_HIGH>;
+ int4-gpio = <&gpio TEGRA_GPIO(J, 0) GPIO_ACTIVE_HIGH>;
+
+ toradex,apalis-tk1-k20-uses-adc;
+ toradex,apalis-tk1-k20-uses-can;
+ toradex,apalis-tk1-k20-uses-gpio;
+ toradex,apalis-tk1-k20-uses-tsc;
+
+ controller-data {
+ nvidia,enable-hw-based-cs;
+ nvidia,cs-setup-clk-count = <12>;
+ nvidia,cs-hold-clk-count = <12>;
+ };
+ };
+
+ /*
+ * spidev for K20 EzPort, can be used with custom firmware for
+ * userspace K20 applications
+ */
+ spidev2: spidev@2 {
+ compatible = "spidev";
+ reg = <2>;
+ spi-max-frequency = <2000000>;
+ };
+ };
+
+ /* SPI4: Apalis SPI2 */
+ spi@7000da00 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+
+ spidev3: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ };
+ };
+
+ pcie-controller {
+ nvidia,port0_status = <1>;
+ nvidia,port1_status = <1>;
+ hvdd_pex-supply = <&reg_3v3>;
+ avdd_pex_pll-supply = <&as3722_sd4>;
+ status = "okay";
+ };
+
+ sata@0x70020000 {
+ nvidia,enable-sata-port;
+ nvidia,sata-connector-type=<STANDARD_SATA>;
+ vdd_sata-supply = <&as3722_sd4>;
+ avdd_sata_pll-supply =<&as3722_sd4>;
+ avdd_sata-supply = <&as3722_sd4>;
+ hvdd_sata-supply = <&reg_3v3>;
+ vddio_pex_sata-supply = <&reg_3v3>;
+ status = "okay";
+ };
+
+ /* TBD */
+ xusb@70090000 {
+ /* nvidia,uses_external_pmic;
+ nvidia,gpio_controls_muxed_ss_lanes; */
+ nvidia,gpio_ss1_sata = <0>;
+ nvidia,portmap = <0x0703>; /* SSP0, SSP1 USB2P0, USB2P1, USB2P2 */
+ nvidia,ss_portmap = <0x72>; /* SSP0 on USB2P2, TBD: SSP1 on USB2P0 */
+ nvidia,lane_owner = <6>; /* USB3P0 USB3P1 */
+ nvidia,ulpicap = <0>; /* No ulpi support. can we remove */
+ /* nvidia,supply_utmi_vbuses = "usb_vbus0", "usb_vbus1", "usb_vbus2";
+ nvidia,supply_s3p3v = "hvdd_usb";
+ nvidia,supply_s1p8v = "avdd_pll_utmip";
+ nvidia,supply_s1p05v = "avddio_usb"; */
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts b/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts
new file mode 100644
index 000000000000..786488a94716
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts
@@ -0,0 +1,225 @@
+/dts-v1/;
+
+#include "tegra124.dtsi"
+#include "tegra124-platforms/tegra124-apalis-v1.2-displays.dtsi"
+#include "tegra124-platforms/tegra124-apalis-keys.dtsi"
+#include "tegra124-platforms/tegra124-apalis-v1.2-gpio.dtsi"
+#include "tegra124-platforms/tegra124-apalis-v1.2-pinmux.dtsi"
+#include "tegra124-platforms/tegra124-apalis-pmic.dtsi"
+#include "tegra124-platforms/tegra124-apalis-v1.2-fixed.dtsi"
+
+/ {
+ model = "Toradex Apalis TK1 on Apalis Evaluation Board";
+ compatible = "toradex,apalis-tk1-v1.2-eval", "toradex,apalis-tk1-eval",
+ "toradex,apalis-tk1", "nvidia,tegra124";
+ nvidia,dtsfilename = __FILE__;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ bootargs = "tegraid=40.0.0.00.00 vmalloc=256M video=tegrafb console=ttyS0,115200n8 earlyprintk";
+ };
+
+ psci {
+ status = "disabled";
+ };
+
+ /* TBD */
+ pinmux {
+ pinctrl-names = "default", "drive", "unused", "suspend";
+ pinctrl-3 = <&pinmux_suspend>;
+
+ /* Change the pin dap_mclk1_req to required configurations */
+ unused_lowpower {
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "sata";
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ /* On suspend, make dap_mclk1_req to pull up */
+ pinmux_suspend: pins_on_suspend {
+ dap_mclk1_req_pee2 {
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ };
+ };
+ };
+
+ /* Apalis UART1 */
+ serial@70006000 {
+ compatible = "nvidia,tegra114-uart", "nvidia,tegra20-uart";
+ status = "okay";
+ };
+
+ /* Apalis UART2 */
+ serial@70006040 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /* Apalis UART3 */
+ serial@70006200 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /* Apalis UART4 */
+ serial@70006300 {
+ compatible = "nvidia,tegra114-hsuart";
+ status = "okay";
+ };
+
+ /*
+ * GEN1_I2C: I2C1_SDA/SCL on MXM3 pin 209/211 (e.g. RTC on carrier
+ * board)
+ */
+ i2c@7000c000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ pcie-switch@58 {
+ compatible = "plx,pex8605";
+ reg = <0x58>;
+ };
+
+ /* M41T0M6 real time clock on carrier board */
+ rtc@68 {
+ compatible = "st,m41t0";
+ reg = <0x68>;
+ };
+ };
+
+ /* PWR_I2C: power I2C to audio codec, PMIC and temperature sensor */
+ i2c@7000d000 {
+ nvidia,bit-banging-xfer-after-shutdown;
+ };
+
+ memory@0x80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x80000000>;
+ };
+
+ /* TBD */
+ camera-pcl {
+ profiles {
+ ov4689@2_0036 {
+ use_of_node = "yes";
+ reset-gpios = <&gpio TEGRA_GPIO(BB, 3) 0>;
+ cam1-gpios = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ };
+ imx185@2_001A {
+ use-of-node = "yes";
+ cam1-gpios = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ };
+ };
+ dpd {
+ default-enable;
+ };
+ };
+
+ /* SPI1: Apalis SPI1 */
+ spi@7000d400 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+
+ spidev0: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ };
+ };
+
+ /* SPI2: MCU SPI */
+ spi@7000d600 {
+ status = "okay";
+ spi-max-frequency = <6000000>;
+
+ k20mcu: apalis-tk1-k20@1 {
+ compatible = "toradex,apalis-tk1-k20";
+ reg = <1>;
+ spi-max-frequency = <6000000>;
+ interrupt-parent =<&gpio>;
+ interrupts = <TEGRA_GPIO(K, 2) IRQ_TYPE_EDGE_FALLING>;
+ rst-gpio = <&gpio TEGRA_GPIO(BB, 6) GPIO_ACTIVE_HIGH>;
+
+ /* GPIO based CS used to enter K20 EzPort mode */
+ ezport-cs-gpio = <&gpio TEGRA_GPIO(W, 2) GPIO_ACTIVE_HIGH>;
+ /* extra INT lines between K20 and TK1 */
+ int2-gpio = <&gpio TEGRA_GPIO(J, 2) GPIO_ACTIVE_HIGH>;
+ int3-gpio = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_HIGH>;
+ int4-gpio = <&gpio TEGRA_GPIO(J, 0) GPIO_ACTIVE_HIGH>;
+
+ toradex,apalis-tk1-k20-uses-adc;
+ toradex,apalis-tk1-k20-uses-can;
+ toradex,apalis-tk1-k20-uses-gpio;
+ toradex,apalis-tk1-k20-uses-tsc;
+
+ controller-data {
+ nvidia,enable-hw-based-cs;
+ nvidia,cs-setup-clk-count = <12>;
+ nvidia,cs-hold-clk-count = <12>;
+ };
+ };
+
+ /*
+ * spidev for K20 EzPort, can be used with custom firmware for
+ * userspace K20 applications
+ */
+ spidev2: spidev@2 {
+ compatible = "spidev";
+ reg = <2>;
+ spi-max-frequency = <2000000>;
+ };
+ };
+
+ /* SPI4: Apalis SPI2 */
+ spi@7000da00 {
+ status = "okay";
+ spi-max-frequency = <25000000>;
+
+ spidev3: spidev@0 {
+ compatible = "spidev";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ };
+ };
+
+ pcie-controller {
+ nvidia,port0_status = <1>;
+ nvidia,port1_status = <1>;
+ hvdd_pex-supply = <&reg_3v3>;
+ avdd_pex_pll-supply = <&as3722_sd4>;
+ status = "okay";
+ };
+
+ sata@0x70020000 {
+ nvidia,enable-sata-port;
+ nvidia,sata-connector-type=<STANDARD_SATA>;
+ vdd_sata-supply = <&as3722_sd4>;
+ avdd_sata_pll-supply =<&as3722_sd4>;
+ avdd_sata-supply = <&as3722_sd4>;
+ hvdd_sata-supply = <&reg_3v3>;
+ vddio_pex_sata-supply = <&reg_3v3>;
+ status = "okay";
+ };
+
+ /* TBD */
+ xusb@70090000 {
+ /* nvidia,uses_external_pmic;
+ nvidia,gpio_controls_muxed_ss_lanes; */
+ nvidia,gpio_ss1_sata = <0>;
+ nvidia,portmap = <0x0703>; /* SSP0, SSP1 USB2P0, USB2P1, USB2P2 */
+ nvidia,ss_portmap = <0x72>; /* SSP0 on USB2P2, TBD: SSP1 on USB2P0 */
+ nvidia,lane_owner = <6>; /* USB3P0 USB3P1 */
+ nvidia,ulpicap = <0>; /* No ulpi support. can we remove */
+ /* nvidia,supply_utmi_vbuses = "usb_vbus0", "usb_vbus1", "usb_vbus2";
+ nvidia,supply_s3p3v = "hvdd_usb";
+ nvidia,supply_s1p8v = "avdd_pll_utmip";
+ nvidia,supply_s1p05v = "avddio_usb"; */
+ status = "okay";
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-displays.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-displays.dtsi
new file mode 100644
index 000000000000..d9eeadc9b8da
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-displays.dtsi
@@ -0,0 +1,12 @@
+/ {
+ host1x {
+ hdmi {
+ status = "okay";
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ };
+ };
+
+ hdmi_ddc: i2c@7000c400 {
+ clock-frequency = <10000>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-fixed.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-fixed.dtsi
new file mode 100644
index 000000000000..e00f482907b7
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-fixed.dtsi
@@ -0,0 +1,229 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+
+/ {
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ avdd_hdmi_pll: regulator@0 {
+ compatible = "regulator-fixed-sync";
+ reg = <0>;
+ regulator-name = "+V1.05_AVDD_HDMI_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ gpio = <&gpio TEGRA_GPIO(H, 7) 0>;
+ vin-supply = <&as3722_sd4>;
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_hdmi_pll";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_hdmi_pll";
+ regulator-consumer-device = "tegradc.0";
+ };
+ };
+ };
+
+ reg_3v3_mxm: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "+V3.3_MXM";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_3v3_emmc";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_com_3v3";
+ };
+ };
+ };
+
+ reg_3v3: regulator@2 {
+ compatible = "regulator-fixed-sync";
+ reg = <2>;
+ regulator-name = "+V3.3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ /* PWR_EN_+V3.3 */
+ gpio = <&as3722 2 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-udc.0";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.0";
+ };
+ c3 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c4 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c5 {
+ regulator-consumer-supply = "hvdd_usb";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c6 {
+ regulator-consumer-supply = "vddio_hv";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c7 {
+ regulator-consumer-supply = "pwrdet_hv";
+ };
+ c8 {
+ regulator-consumer-supply = "hvdd_sata";
+ };
+ c9 {
+ regulator-consumer-supply = "hvdd_pex";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c10 {
+ regulator-consumer-supply = "hvdd_pex_pll";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c11 {
+ regulator-consumer-supply = "vdd_sys_cam_3v3";
+ };
+ c12 {
+ regulator-consumer-supply = "VDDA";
+ regulator-consumer-device = "4-000a";
+ };
+ c13 {
+ regulator-consumer-supply = "vddio_sd_slot";
+ regulator-consumer-device = "sdhci-tegra.0";
+ };
+ c14 {
+ regulator-consumer-supply = "vddio_sd_slot";
+ regulator-consumer-device = "sdhci-tegra.2";
+ };
+ c15 {
+ regulator-consumer-supply = "vdd_3v3_sensor";
+ };
+ c16 {
+ regulator-consumer-supply = "vdd_kp_3v3";
+ };
+ c17 {
+ regulator-consumer-supply = "vdd_tp_3v3";
+ };
+ c18 {
+ regulator-consumer-supply = "vdd_dtv_3v3";
+ };
+ c19 {
+ regulator-consumer-supply = "vdd";
+ regulator-consumer-device = "4-004c";
+ };
+ };
+ };
+
+ reg_5v0: regulator@3 {
+ compatible = "regulator-fixed-sync";
+ reg = <3>;
+ regulator-name = "5V_SW";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_hdmi_5v0";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_hdmi_5v0";
+ regulator-consumer-device = "tegradc.0";
+ };
+ c3 {
+ regulator-consumer-supply = "vdd_5v0_sensor";
+ };
+ };
+ };
+
+ /* USBO1_EN */
+ reg_usbo1_vbus: regulator@4 {
+ compatible = "regulator-fixed-sync";
+ reg = <4>;
+ regulator-name = "VCC_USBO1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 4) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.0";
+ };
+ c2 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-otg";
+ };
+ c3 {
+ regulator-consumer-supply = "usb_vbus0";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ };
+ };
+
+ /* USBH_EN */
+ reg_usbh_vbus: regulator@5 {
+ compatible = "regulator-fixed-sync";
+ reg = <5>;
+ regulator-name = "VCC_USBH(2A|2C|2D|3|4)";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(N, 5) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c2 {
+ regulator-consumer-supply = "usb_vbus2";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c3 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c4 {
+ regulator-consumer-supply = "usb_vbus1";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ };
+ };
+
+ vdd_lcd_bl_en: regulator@6 {
+ compatible = "regulator-fixed-sync";
+ reg = <6>;
+ regulator-name = "BKL1_ON";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* Apalis BKL1_ON */
+ gpio = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_lcd_bl";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_lcd_bl_en";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-gpio.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-gpio.dtsi
new file mode 100644
index 000000000000..df4ed7b8bd59
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-gpio.dtsi
@@ -0,0 +1,66 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+
+/ {
+ gpio: gpio@6000d000 {
+ gpio-init-names = "default";
+ gpio-init-0 = <&gpio_default>;
+
+ gpio_default: default {
+ gpio-input = <
+ TEGRA_GPIO(A, 1) /* 120 UART1_DSR */
+ TEGRA_GPIO(B, 1) /* 124 UART1_DCD */
+ TEGRA_GPIO(I, 5) /* MCU_INT3# */
+ TEGRA_GPIO(I, 6) /* TEMP_ALERT_L */
+ TEGRA_GPIO(J, 0) /* MCU_INT4# */
+ TEGRA_GPIO(J, 2) /* MCU_INT2# */
+ TEGRA_GPIO(K, 2) /* MCU_INT1# */
+ TEGRA_GPIO(K, 7) /* 122 UART1_RI */
+ TEGRA_GPIO(N, 7) /* 232 HDMI1_HPD */
+ TEGRA_GPIO(V, 3) /* 164 MMC1_CD# */
+ TEGRA_GPIO(V, 4) /* 5 Apalis GPIO3 */
+ TEGRA_GPIO(V, 5) /* 7 Apalis GPIO4 */
+ TEGRA_GPIO(W, 3) /* TOUCH_INT */
+ TEGRA_GPIO(W, 5) /* 152 MMC1_D5 */
+ TEGRA_GPIO(BB, 0) /* 96 USBH_OC# */
+ TEGRA_GPIO(BB, 4) /* 262 USBO1_OC# */
+ TEGRA_GPIO(CC, 5) /* 148 MMC1_D4 */
+ TEGRA_GPIO(DD, 1) /* 15 Apalis GPIO7 */
+ TEGRA_GPIO(DD, 2) /* 17 Apalis GPIO8 */
+ TEGRA_GPIO(DD, 3) /* 37 WAKE1_MICO */
+ TEGRA_GPIO(DD, 5) /* 11 Apalis GPIO5 */
+ TEGRA_GPIO(DD, 6) /* 13 Apalis GPIO6 */
+ TEGRA_GPIO(EE, 3) /* 220 HDMI1_CEC */
+ TEGRA_GPIO(EE, 5) /* 156 MMC1_D6 */
+#ifndef APALIS_TK1_EDP /* used as optional eDP hot-plug pin */
+ TEGRA_GPIO(FF, 0) /* 3 Apalis GPIO2 */
+#endif
+ TEGRA_GPIO(FF, 1) /* 158 MMC1_D7 */
+ TEGRA_GPIO(FF, 2) /* 1 Apalis GPIO1 */
+ >;
+ gpio-output-low = <
+ TEGRA_GPIO(C, 0) /* 110 UART1_DTR */
+ TEGRA_GPIO(O, 5) /* LAN_WAKE_N */
+ TEGRA_GPIO(O, 6) /* LAN_DEV_OFF_N */
+ TEGRA_GPIO(Q, 0) /* Shift_CTRL_OE[0] */
+ TEGRA_GPIO(Q, 1) /* Shift_CTRL_OE[1] */
+ TEGRA_GPIO(Q, 2) /* Shift_CTRL_OE[2] */
+ TEGRA_GPIO(Q, 4) /* Shift_CTRL_OE[4] */
+ TEGRA_GPIO(R, 0) /* Shift_CTRL_Dir_In[0] */
+ TEGRA_GPIO(R, 1) /* Shift_CTRL_Dir_In[1] */
+ TEGRA_GPIO(R, 2) /* Shift_CTRL_OE[3] */
+ TEGRA_GPIO(S, 2) /* LAN_RESET_N */
+ TEGRA_GPIO(S, 3) /* Shift_CTRL_Dir_In[2] */
+ TEGRA_GPIO(U, 4) /* RESET_MOCI_CTRL */
+ TEGRA_GPIO(BB, 3) /* 198 DAP1_RESET */
+ TEGRA_GPIO(BB, 6) /* MCU_RESET */
+ >;
+ gpio-output-high = <
+ TEGRA_GPIO(N, 2) /* 35 SATA1_ACT# */
+ TEGRA_GPIO(Q, 5) /* Shift_CTRL_Dir_Out[0] */
+ TEGRA_GPIO(Q, 6) /* Shift_CTRL_Dir_Out[1] */
+ TEGRA_GPIO(Q, 7) /* Shift_CTRL_Dir_Out[2] */
+ TEGRA_GPIO(BB, 5) /* 286 BKL1_ON */
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-keys.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-keys.dtsi
new file mode 100644
index 000000000000..9ba252613062
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-keys.dtsi
@@ -0,0 +1,24 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ wakeup {
+ label = "WAKE1_MICO";
+ gpios = <&gpio TEGRA_GPIO(DD, 3) GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ gpio-key,wakeup;
+ };
+
+ wol_wakeup {
+ label = "WOL_GPIO";
+ gpios = <&gpio TEGRA_GPIO(O, 5) GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_WAKEUP>;
+ debounce-interval = <10>;
+ gpio-key,wakeup;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pinmux.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pinmux.dtsi
new file mode 100644
index 000000000000..3c45320bf024
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pinmux.dtsi
@@ -0,0 +1,1516 @@
+#include <dt-bindings/pinctrl/pinctrl-tegra.h>
+
+/ {
+ pinmux: pinmux {
+ status = "okay";
+ pinctrl-names = "default", "drive", "unused";
+ pinctrl-0 = <&pinmux_default>;
+ pinctrl-1 = <&drive_default>;
+ pinctrl-2 = <&pinmux_unused_lowpower>;
+
+ pinmux_default: common {
+ /* Analogue Audio (On-module) */
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_pw4 {
+ nvidia,pins = "dap_mclk1_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_ON */
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "vgp5";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_PWM */
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis CAM1_MCLK */
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi_alt3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis Digital Audio */
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_din_pa4 {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_dout_pa5 {
+ nvidia,pins = "dap2_dout_pa5";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb3 { /* DAP1_RESET */
+ nvidia,pins = "pbb3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis GPIO */
+ ddc_scl_pv4 {
+ nvidia,pins = "ddc_scl_pv4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dp_hpd_pff0 {
+ nvidia,pins = "dp_hpd_pff0";
+ nvidia,function = "dp";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pff2 {
+ nvidia,pins = "pff2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ owr { /* PEX_L1_CLKREQ_N multiplexed GPIO6 */
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_CEC */
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_HPD */
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis I2C1 */
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C2 (DDC) */
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "i2c2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C3 (CAM) */
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis MMC1 */
+ sdmmc1_cd_n_pv3 { /* CD# GPIO */
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_out_pw5 { /* D5 GPIO */
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 { /* D4 GPIO */
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_clk_lb_in_pee5 { /* D6 GPIO */
+ nvidia,pins = "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ usb_vbus_en2_pff1 { /* D7 GPIO */
+ nvidia,pins = "usb_vbus_en2_pff1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis PWM */
+ ph0 {
+ nvidia,pins = "ph0";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph1 {
+ nvidia,pins = "ph1";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph2 {
+ nvidia,pins = "ph2";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /* PWM3 active on pu6 being Apalis BKL1_PWM as well */
+ ph3 {
+ nvidia,pins = "ph3";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SATA1_ACT# */
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SD1 */
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_cd_n_pv2 { /* CD# GPIO */
+ nvidia,pins = "sdmmc3_cd_n_pv2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis SPDIF */
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis SPI1 */
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SPI2 */
+ pg5 {
+ nvidia,pins = "pg5";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg6 {
+ nvidia,pins = "pg6";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg7 {
+ nvidia,pins = "pg7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi3 {
+ nvidia,pins = "pi3";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART1 */
+ pb1 { /* DCD GPIO */
+ nvidia,pins = "pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk7 { /* RI GPIO */
+ nvidia,pins = "pk7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_txd_pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_rxd_pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_cts_n_pu2 {
+ nvidia,pins = "pu2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_rts_n_pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_cts_n_pa1 { /* DSR GPIO */
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 { /* DTR GPIO */
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART2 */
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART3 */
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis UART4 */
+ uart4_rxd_pb0 {
+ nvidia,pins = "pb0";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart4_txd_pj7 {
+ nvidia,pins = "pj7";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_EN */
+ usb_vbus_en1_pn5 {
+ nvidia,pins = "usb_vbus_en1_pn5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_OC# */
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis USBO1_EN */
+ usb_vbus_en0_pn4 {
+ nvidia,pins = "usb_vbus_en0_pn4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBO1_OC# */
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis WAKE1_MICO */
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* CORE_PWR_REQ */
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* CPU_PWR_REQ */
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* DVFS */
+ dvfs_pwm_px0 {
+ nvidia,pins = "dvfs_pwm_px0";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_clk_px2 {
+ nvidia,pins = "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* eMMC */
+ sdmmc4_dat0_paa0 {
+ nvidia,pins = "sdmmc4_dat0_paa0";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* JTAG_RTCK */
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_DEV_OFF# */
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_RESET# */
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_WAKE# */
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT1# */
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT2# */
+ pj2 {
+ nvidia,pins = "pj2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT3# */
+ pi5 {
+ nvidia,pins = "pi5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT4# */
+ pj0 {
+ nvidia,pins = "pj0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_RESET */
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* MCU SPI */
+ gpio_x4_aud_px4 {
+ nvidia,pins = "gpio_x4_aud_px4";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x5_aud_px5 {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x6_aud_px6 { /* MCU_CS */
+ nvidia,pins = "gpio_x6_aud_px6";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x7_aud_px7 {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gpio_w2_aud_pw2 { /* MCU_CSEZP */
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* PMIC_CLK_32K */
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PMIC_CPU_OC_INT */
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_I2C */
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_INT_N */
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* RESET_MOCI_CTRL */
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* RESET_OUT_N */
+ reset_out_n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_IN */
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Configure level-shifter as output for HDA */
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_OUT */
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_OE */
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GPIO_PI6 aka TMP451 ALERT#/THERM2# */
+ pi6 {
+ nvidia,pins = "pi6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* TOUCH_INT */
+ gpio_w3_aud_pw3 {
+ nvidia,pins = "gpio_w3_aud_pw3";
+ nvidia,function = "spi6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pc7 { /* NC */
+ nvidia,pins = "pc7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg0 { /* NC */
+ nvidia,pins = "pg0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg1 { /* NC */
+ nvidia,pins = "pg1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg2 { /* NC */
+ nvidia,pins = "pg2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg3 { /* NC */
+ nvidia,pins = "pg3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg4 { /* NC */
+ nvidia,pins = "pg4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph4 { /* NC */
+ nvidia,pins = "ph4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph5 { /* NC */
+ nvidia,pins = "ph5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 { /* NC */
+ nvidia,pins = "ph6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph7 { /* NC */
+ nvidia,pins = "ph7";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi0 { /* NC */
+ nvidia,pins = "pi0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi1 { /* NC */
+ nvidia,pins = "pi1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi2 { /* NC */
+ nvidia,pins = "pi2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi4 { /* NC */
+ nvidia,pins = "pi4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi7 { /* NC */
+ nvidia,pins = "pi7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk0 { /* NC */
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 { /* NC */
+ nvidia,pins = "pk1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk3 { /* NC */
+ nvidia,pins = "pk3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 { /* NC */
+ nvidia,pins = "pk4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_fs_pn0 { /* NC */
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pn1 { /* NC */
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pn3 { /* NC */
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data7_po0 { /* NC */
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 { /* NC */
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 { /* NC */
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data2_po3 { /* NC */
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data3_po4 { /* NC */
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data6_po7 { /* NC */
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_fs_pp4 { /* NC */
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pp5 { /* NC */
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pp6 { /* NC */
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pp7 { /* NC */
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col3_pq3 { /* NC */
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row3_pr3 { /* NC */
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row4_pr4 { /* NC */
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row5_pr5 { /* NC */
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row6_pr6 { /* NC */
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row7_pr7 { /* NC */
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row8_ps0 { /* NC */
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row9_ps1 { /* NC */
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row12_ps4 { /* NC */
+ nvidia,pins = "kb_row12_ps4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row13_ps5 { /* NC */
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row14_ps6 { /* NC */
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row15_ps7 { /* NC */
+ nvidia,pins = "kb_row15_ps7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row16_pt0 { /* NC */
+ nvidia,pins = "kb_row16_pt0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row17_pt1 { /* NC */
+ nvidia,pins = "kb_row17_pt1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu5 { /* NC */
+ nvidia,pins = "pu5";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv0 { /* NC */
+ nvidia,pins = "pv0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv1 { /* NC */
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_px1 { /* NC */
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x3_aud_px3 { /* NC */
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb7 { /* NC */
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc1 { /* NC */
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc2 { /* NC */
+ nvidia,pins = "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 { /* NC */
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_req_pee2 { /* NC */
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /*
+ * Leave SDMMC3_CLK_LB_OUT muxed as SDMMC3 with output
+ * driver enabled aka not tristated and input driver
+ * enabled as well as it features some magic properties
+ * even though the external loopback is disabled and the
+ * internal loopback used as per
+ * SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1
+ * bits being set to 0xfffd according to the TRM!
+ */
+ sdmmc3_clk_lb_out_pee4 { /* NC */
+ nvidia,pins = "sdmmc3_clk_lb_out_pee4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ };
+
+/* TBD */
+ pinmux_unused_lowpower: unused_lowpower {
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ drive_default: drive {
+ drive_sdio1 {
+ nvidia,pins = "drive_sdio1";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <32>;
+ nvidia,pull-up-strength = <42>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
+
+ drive_sdio3 {
+ nvidia,pins = "drive_sdio3";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <20>;
+ nvidia,pull-up-strength = <36>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
+
+ drive_gma {
+ nvidia,pins = "drive_gma";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <1>;
+ nvidia,pull-up-strength = <2>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,drive-type = <1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pmic.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pmic.dtsi
new file mode 100644
index 000000000000..342487b285f0
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-pmic.dtsi
@@ -0,0 +1,498 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+#include <dt-bindings/mfd/as3722.h>
+#include <dt-bindings/regulator/regulator.h>
+
+/ {
+ i2c@7000d000 {
+ as3722: as3722@40 {
+ compatible = "ams,as3722";
+ reg = <0x40>;
+ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ams,major-rev = <1>;
+ ams,minor-rev = <2>;
+ ams,system-power-controller;
+ ams,extcon-name = "as3722-extcon";
+ ams,enable-adc1-continuous-mode;
+ ams,enable-low-voltage-range;
+ ams,adc-channel = <12>;
+ ams,hi-threshold = <256>;
+ ams,low-threshold = <128>;
+ ams,enable-clock32k-out;
+ ams,backup-battery-chargable;
+ ams,battery-backup-charge-current = <AS3722_BBCCUR_400UA>;
+ ams,battery-backup-enable-bypass;
+ ams,battery-backup-charge-mode = <AS3722_BBCMODE_ACT_STBY_OFF>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&as3722_default>;
+
+ as3722_default: pinmux@0 {
+ gpio2_7 {
+ pins = "gpio2", /* PWR_EN_+V3.3 */
+ "gpio7"; /* +V1.6_LPO */
+ function = "gpio";
+ bias-pull-up;
+ };
+
+ gpio1_3_4_5_6 {
+ pins = "gpio1", "gpio3", "gpio4",
+ "gpio5", "gpio6";
+ bias-high-impedance;
+ };
+ };
+
+ regulators {
+ compatible = "ams,as3722";
+ ldo0-in-supply = <&as3722_sd2>;
+ ldo2-in-supply = <&as3722_sd5>;
+ ldo5-in-supply = <&as3722_sd5>;
+ ldo7-in-supply = <&as3722_sd5>;
+
+ as3722_sd0: sd0 {
+ regulator-name = "+VDD_CPU_AP";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <AS3722_EXT_CONTROL_ENABLE2>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_cpu";
+ };
+ };
+ };
+
+ as3722_sd1: sd1 {
+ regulator-name = "+VDD_CORE";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-init-microvolt = <1150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ams,ext-control = <AS3722_EXT_CONTROL_ENABLE1>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_core";
+ };
+ };
+ };
+
+ as3722_sd2: sd2 {
+ regulator-name = "+V1.35_VDDIO_DDR(sd2)";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vddio_ddr";
+ };
+ c2 {
+ regulator-consumer-supply = "vddio_ddr_mclk";
+ };
+ c3 {
+ regulator-consumer-supply = "vddio_ddr3";
+ };
+ c4 {
+ regulator-consumer-supply = "vcore1_ddr3";
+ };
+ };
+ };
+
+ as3722_sd4: sd4 {
+ regulator-name = "+V1.05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_pex_pll";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c2 {
+ regulator-consumer-supply = "avddio_pex_pll";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c3 {
+ regulator-consumer-supply = "dvddio_pex";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c4 {
+ regulator-consumer-supply = "pwrdet_pex_ctl";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c5 {
+ regulator-consumer-supply = "avdd_sata";
+ };
+ c6 {
+ regulator-consumer-supply = "vdd_sata";
+ };
+ c7 {
+ regulator-consumer-supply = "avdd_sata_pll";
+ };
+ c8 {
+ regulator-consumer-supply = "avddio_usb";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c9 {
+ regulator-consumer-supply = "avdd_hdmi";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c10 {
+ regulator-consumer-supply = "avdd_hdmi";
+ regulator-consumer-device = "tegradc.0";
+ };
+ };
+ };
+
+ as3722_sd5: sd5 {
+ regulator-name = "+V1.8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vddio_sys";
+ };
+ c2 {
+ regulator-consumer-supply = "vddio_sys_2";
+ };
+ c3 {
+ regulator-consumer-supply = "vddio_audio";
+ };
+ c4 {
+ regulator-consumer-supply = "pwrdet_audio";
+ };
+ c5 {
+ regulator-consumer-supply = "vdd_1v8_emmc";
+ };
+ c6 {
+ regulator-consumer-supply = "vddio_sdmmc";
+ regulator-consumer-device = "sdhci-tegra.3";
+ };
+ c7 {
+ regulator-consumer-supply = "pwrdet_sdmmc4";
+ };
+ c8 {
+ regulator-consumer-supply = "vddio_uart";
+ };
+ c9 {
+ regulator-consumer-supply = "pwrdet_uart";
+ };
+ c10 {
+ regulator-consumer-supply = "vddio_bb";
+ };
+ c11 {
+ regulator-consumer-supply = "pwrdet_bb";
+ };
+ c12 {
+ regulator-consumer-supply = "vddio_gmi";
+ };
+ c13 {
+ regulator-consumer-supply = "pwrdet_nand";
+ };
+ c14 {
+ regulator-consumer-supply = "avdd_osc";
+ };
+ c15 {
+ /* LVDS */
+ regulator-consumer-supply = "dvdd_lcd";
+ };
+ c16 {
+ /* LVDS */
+ regulator-consumer-supply = "vdd_ds_1v8";
+ };
+ c17 {
+ regulator-consumer-supply = "VDDD";
+ regulator-consumer-device = "4-000a";
+ };
+ c18 {
+ regulator-consumer-supply = "VDDIO";
+ regulator-consumer-device = "4-000a";
+ };
+ c19 {
+ regulator-consumer-supply = "vdd_1v8_sensor";
+ };
+ c20 {
+ regulator-consumer-supply = "vdd_1v8_sdmmc";
+ };
+ c21 {
+ regulator-consumer-supply = "vdd_kp_1v8";
+ };
+ c22 {
+ regulator-consumer-supply = "vdd_tp_1v8";
+ };
+ c23 {
+ regulator-consumer-supply = "dvdd";
+ regulator-consumer-device = "spi0.0";
+ };
+ c24 {
+ regulator-consumer-supply = "vlogic";
+ regulator-consumer-device = "0-0069";
+ };
+ c25 {
+ regulator-consumer-supply = "avdd_pll_utmip";
+ regulator-consumer-device = "tegra-udc.0";
+ };
+ c26 {
+ regulator-consumer-supply = "avdd_pll_utmip";
+ regulator-consumer-device = "tegra-ehci.0";
+ };
+ c27 {
+ regulator-consumer-supply = "avdd_pll_utmip";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c28 {
+ regulator-consumer-supply = "avdd_pll_utmip";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c29 {
+ regulator-consumer-supply = "avdd_pll_utmip";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ };
+ };
+
+ as3722_sd6: sd6 {
+ regulator-name = "+VDD_GPU_AP";
+ regulator-min-microvolt = <650000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-min-microamp = <3500000>;
+ regulator-max-microamp = <3500000>;
+ regulator-init-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_gpu";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_gpu_simon";
+ };
+ };
+ };
+
+ as3722_ldo0: ldo0 {
+ regulator-name = "+V1.05_AVDD";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,ext-control = <AS3722_EXT_CONTROL_ENABLE1>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_pll_m";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_pll_ap_c2_c3";
+ };
+ c3 {
+ regulator-consumer-supply = "avdd_pll_cud2dpd";
+ };
+ c4 {
+ regulator-consumer-supply = "avdd_pll_c4";
+ };
+ c5 {
+ regulator-consumer-supply = "avdd_lvds0_io";
+ };
+ c6 {
+ regulator-consumer-supply = "vddio_ddr_hs";
+ };
+ c7 {
+ regulator-consumer-supply = "avdd_pll_erefe";
+ };
+ c8 {
+ regulator-consumer-supply = "avdd_pll_x";
+ };
+ c9 {
+ regulator-consumer-supply = "avdd_pll_cg";
+ };
+ };
+ };
+
+ as3722_ldo1: ldo1 {
+ regulator-name = "VDDIO_SDMMC1";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vddio_sdmmc";
+ regulator-consumer-device = "sdhci-tegra.0";
+ };
+ c2 {
+ regulator-consumer-supply = "pwrdet_sdmmc1";
+ };
+ };
+ };
+
+ as3722_ldo2: ldo2 {
+ regulator-name = "+V1.2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-boot-on;
+ regulator-always-on;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vddio_hsic";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c2 {
+ regulator-consumer-supply = "vddio_hsic";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c3 {
+ regulator-consumer-supply = "vddio_hsic";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c4 {
+ regulator-consumer-supply = "avdd_dsi_csi";
+ regulator-consumer-device = "tegradc.0";
+ };
+ c5 {
+ regulator-consumer-supply = "avdd_dsi_csi";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c6 {
+ regulator-consumer-supply = "avdd_dsi_csi";
+ regulator-consumer-device = "vi.0";
+ };
+ c7 {
+ regulator-consumer-supply = "avdd_dsi_csi";
+ regulator-consumer-device = "vi.1";
+ };
+ c8 {
+ regulator-consumer-supply = "pwrdet_mipi";
+ };
+ c9 {
+ regulator-consumer-supply = "avdd_hsic_com";
+ };
+ c10 {
+ regulator-consumer-supply = "avdd_hsic_mdm";
+ };
+ c11 {
+ regulator-consumer-supply = "vdig_csi";
+ regulator-consumer-device = "2-0036";
+ };
+ c12 {
+ /* panel-a-1080p-14-0.c */
+ regulator-consumer-supply = "vdd_1v2_en";
+ };
+ };
+ };
+
+ as3722_ldo3: ldo3 {
+ regulator-name = "+V1.05_RTC";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ams,enable-tracking;
+ ams,disable-tracking-suspend;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_rtc";
+ };
+ };
+ };
+
+ /* 1.8V for LVDS, 3.3V for eDP */
+ as3722_ldo4: ldo4 {
+ regulator-name = "AVDD_LVDS0_PLL";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-init-microvolt = <1800000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_lvds0_pll";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_3v3_dp";
+ };
+ c3 {
+ regulator-consumer-supply = "avdd_lcd";
+ };
+ };
+ };
+
+ /* LDO5 not used */
+
+ as3722_ldo6: ldo6 {
+ regulator-name = "VDDIO_SDMMC3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vddio_sdmmc";
+ regulator-consumer-device = "sdhci-tegra.2";
+ };
+ c2 {
+ regulator-consumer-supply = "pwrdet_sdmmc3";
+ };
+ };
+ };
+
+ /* LDO7 not used */
+
+ as3722_ldo9: ldo9 {
+ regulator-name = "+V3.3_ETH(ldo9)";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "i210_vdd3p3_ldo9";
+ regulator-consumer-device = "pcie-controller.1";
+ };
+ };
+ };
+
+ as3722_ldo10: ldo10 {
+ regulator-name = "+V3.3_ETH(ldo10)";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "i210_vdd3p3_ldo10";
+ regulator-consumer-device = "pcie-controller.1";
+ };
+ };
+ };
+
+ as3722_ldo11: ldo11 {
+ regulator-name = "+V1.8_VPP_FUSE";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+
+ /* Populate fuse supply */
+ efuse@7000f800 {
+ vpp_fuse-supply = <&as3722_ldo11>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-displays.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-displays.dtsi
new file mode 100644
index 000000000000..e7141b33e9a3
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-displays.dtsi
@@ -0,0 +1,12 @@
+/ {
+ host1x {
+ hdmi {
+ status = "okay";
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ };
+ };
+
+ hdmi_ddc: i2c@7000c700 {
+ clock-frequency = <10000>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-fixed.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-fixed.dtsi
new file mode 100644
index 000000000000..5639ba3c7f99
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-fixed.dtsi
@@ -0,0 +1,229 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+
+/ {
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ avdd_hdmi_pll: regulator@0 {
+ compatible = "regulator-fixed-sync";
+ reg = <0>;
+ regulator-name = "+V1.05_AVDD_HDMI_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ gpio = <&gpio TEGRA_GPIO(H, 7) 0>;
+ vin-supply = <&as3722_sd4>;
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_hdmi_pll";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_hdmi_pll";
+ regulator-consumer-device = "tegradc.0";
+ };
+ };
+ };
+
+ reg_3v3_mxm: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "+V3.3_MXM";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_3v3_emmc";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_com_3v3";
+ };
+ };
+ };
+
+ reg_3v3: regulator@2 {
+ compatible = "regulator-fixed-sync";
+ reg = <2>;
+ regulator-name = "+V3.3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ /* PWR_EN_+V3.3 */
+ gpio = <&as3722 2 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-udc.0";
+ };
+ c2 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.0";
+ };
+ c3 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c4 {
+ regulator-consumer-supply = "avdd_usb";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c5 {
+ regulator-consumer-supply = "hvdd_usb";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c6 {
+ regulator-consumer-supply = "vddio_hv";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c7 {
+ regulator-consumer-supply = "pwrdet_hv";
+ };
+ c8 {
+ regulator-consumer-supply = "hvdd_sata";
+ };
+ c9 {
+ regulator-consumer-supply = "hvdd_pex";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c10 {
+ regulator-consumer-supply = "hvdd_pex_pll";
+ regulator-consumer-device = "tegra-pcie";
+ };
+ c11 {
+ regulator-consumer-supply = "vdd_sys_cam_3v3";
+ };
+ c12 {
+ regulator-consumer-supply = "VDDA";
+ regulator-consumer-device = "4-000a";
+ };
+ c13 {
+ regulator-consumer-supply = "vddio_sd_slot";
+ regulator-consumer-device = "sdhci-tegra.0";
+ };
+ c14 {
+ regulator-consumer-supply = "vddio_sd_slot";
+ regulator-consumer-device = "sdhci-tegra.2";
+ };
+ c15 {
+ regulator-consumer-supply = "vdd_3v3_sensor";
+ };
+ c16 {
+ regulator-consumer-supply = "vdd_kp_3v3";
+ };
+ c17 {
+ regulator-consumer-supply = "vdd_tp_3v3";
+ };
+ c18 {
+ regulator-consumer-supply = "vdd_dtv_3v3";
+ };
+ c19 {
+ regulator-consumer-supply = "vdd";
+ regulator-consumer-device = "4-004c";
+ };
+ };
+ };
+
+ reg_5v0: regulator@3 {
+ compatible = "regulator-fixed-sync";
+ reg = <3>;
+ regulator-name = "5V_SW";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_hdmi_5v0";
+ regulator-consumer-device = "tegradc.1";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_hdmi_5v0";
+ regulator-consumer-device = "tegradc.0";
+ };
+ c3 {
+ regulator-consumer-supply = "vdd_5v0_sensor";
+ };
+ };
+ };
+
+ /* USBO1_EN */
+ reg_usbo1_vbus: regulator@4 {
+ compatible = "regulator-fixed-sync";
+ reg = <4>;
+ regulator-name = "VCC_USBO1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(T, 5) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.0";
+ };
+ c2 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-otg";
+ };
+ c3 {
+ regulator-consumer-supply = "usb_vbus0";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ };
+ };
+
+ /* USBH_EN */
+ reg_usbh_vbus: regulator@5 {
+ compatible = "regulator-fixed-sync";
+ reg = <5>;
+ regulator-name = "VCC_USBH(2A|2C|2D|3|4)";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio TEGRA_GPIO(T, 6) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.2";
+ };
+ c2 {
+ regulator-consumer-supply = "usb_vbus2";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ c3 {
+ regulator-consumer-supply = "usb_vbus";
+ regulator-consumer-device = "tegra-ehci.1";
+ };
+ c4 {
+ regulator-consumer-supply = "usb_vbus1";
+ regulator-consumer-device = "tegra-xhci";
+ };
+ };
+ };
+
+ vdd_lcd_bl_en: regulator@6 {
+ compatible = "regulator-fixed-sync";
+ reg = <6>;
+ regulator-name = "BKL1_ON";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ /* Apalis BKL1_ON */
+ gpio = <&gpio TEGRA_GPIO(BB, 5) 0>;
+ enable-active-high;
+
+ consumers {
+ c1 {
+ regulator-consumer-supply = "vdd_lcd_bl";
+ };
+ c2 {
+ regulator-consumer-supply = "vdd_lcd_bl_en";
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-gpio.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-gpio.dtsi
new file mode 100644
index 000000000000..e67f79a3c5dd
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-gpio.dtsi
@@ -0,0 +1,66 @@
+#include <dt-bindings/gpio/tegra-gpio.h>
+
+/ {
+ gpio: gpio@6000d000 {
+ gpio-init-names = "default";
+ gpio-init-0 = <&gpio_default>;
+
+ gpio_default: default {
+ gpio-input = <
+ TEGRA_GPIO(A, 1) /* 120 UART1_DSR */
+ TEGRA_GPIO(B, 1) /* 124 UART1_DCD */
+ TEGRA_GPIO(I, 5) /* MCU_INT3# */
+ TEGRA_GPIO(I, 6) /* TEMP_ALERT_L */
+ TEGRA_GPIO(J, 0) /* MCU_INT4# */
+ TEGRA_GPIO(J, 2) /* MCU_INT2# */
+ TEGRA_GPIO(K, 2) /* MCU_INT1# */
+ TEGRA_GPIO(K, 7) /* 122 UART1_RI */
+ TEGRA_GPIO(N, 4) /* 5 Apalis GPIO3 */
+ TEGRA_GPIO(N, 5) /* 7 Apalis GPIO4 */
+ TEGRA_GPIO(N, 7) /* 232 HDMI1_HPD */
+ TEGRA_GPIO(V, 3) /* 164 MMC1_CD# */
+ TEGRA_GPIO(W, 3) /* TOUCH_INT */
+ TEGRA_GPIO(W, 5) /* 152 MMC1_D5 */
+ TEGRA_GPIO(BB, 0) /* 96 USBH_OC# */
+ TEGRA_GPIO(BB, 4) /* 262 USBO1_OC# */
+ TEGRA_GPIO(CC, 5) /* 148 MMC1_D4 */
+ TEGRA_GPIO(DD, 1) /* 15 Apalis GPIO7 */
+ TEGRA_GPIO(DD, 2) /* 17 Apalis GPIO8 */
+ TEGRA_GPIO(DD, 3) /* 37 WAKE1_MICO */
+ TEGRA_GPIO(DD, 5) /* 11 Apalis GPIO5 */
+ TEGRA_GPIO(DD, 6) /* 13 Apalis GPIO6 */
+ TEGRA_GPIO(EE, 3) /* 220 HDMI1_CEC */
+ TEGRA_GPIO(EE, 5) /* 156 MMC1_D6 */
+#ifndef APALIS_TK1_EDP /* used as optional eDP hot-plug pin */
+ TEGRA_GPIO(FF, 0) /* 3 Apalis GPIO2 */
+#endif
+ TEGRA_GPIO(FF, 1) /* 158 MMC1_D7 */
+ TEGRA_GPIO(FF, 2) /* 1 Apalis GPIO1 */
+ >;
+ gpio-output-low = <
+ TEGRA_GPIO(C, 0) /* 110 UART1_DTR */
+ TEGRA_GPIO(O, 5) /* LAN_WAKE_N */
+ TEGRA_GPIO(O, 6) /* LAN_DEV_OFF_N */
+ TEGRA_GPIO(Q, 0) /* Shift_CTRL_OE[0] */
+ TEGRA_GPIO(Q, 1) /* Shift_CTRL_OE[1] */
+ TEGRA_GPIO(Q, 2) /* Shift_CTRL_OE[2] */
+ TEGRA_GPIO(Q, 4) /* Shift_CTRL_OE[4] */
+ TEGRA_GPIO(R, 0) /* Shift_CTRL_Dir_In[0] */
+ TEGRA_GPIO(R, 1) /* Shift_CTRL_Dir_In[1] */
+ TEGRA_GPIO(R, 2) /* Shift_CTRL_OE[3] */
+ TEGRA_GPIO(S, 2) /* LAN_RESET_N */
+ TEGRA_GPIO(S, 3) /* Shift_CTRL_Dir_In[2] */
+ TEGRA_GPIO(U, 4) /* RESET_MOCI_CTRL */
+ TEGRA_GPIO(BB, 3) /* 198 DAP1_RESET */
+ TEGRA_GPIO(BB, 6) /* MCU_RESET */
+ >;
+ gpio-output-high = <
+ TEGRA_GPIO(N, 2) /* 35 SATA1_ACT# */
+ TEGRA_GPIO(Q, 5) /* Shift_CTRL_Dir_Out[0] */
+ TEGRA_GPIO(Q, 6) /* Shift_CTRL_Dir_Out[1] */
+ TEGRA_GPIO(Q, 7) /* Shift_CTRL_Dir_Out[2] */
+ TEGRA_GPIO(BB, 5) /* 286 BKL1_ON */
+ >;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-pinmux.dtsi b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-pinmux.dtsi
new file mode 100644
index 000000000000..d7f5a6c46dc1
--- /dev/null
+++ b/arch/arm/boot/dts/tegra124-platforms/tegra124-apalis-v1.2-pinmux.dtsi
@@ -0,0 +1,1522 @@
+#include <dt-bindings/pinctrl/pinctrl-tegra.h>
+
+/ {
+ pinmux: pinmux {
+ status = "okay";
+ pinctrl-names = "default", "drive", "unused";
+ pinctrl-0 = <&pinmux_default>;
+ pinctrl-1 = <&drive_default>;
+ pinctrl-2 = <&pinmux_unused_lowpower>;
+
+ pinmux_default: common {
+ /* Analogue Audio (On-module) */
+ dap3_fs_pp0 {
+ nvidia,pins = "dap3_fs_pp0";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_din_pp1 {
+ nvidia,pins = "dap3_din_pp1";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap3_dout_pp2 {
+ nvidia,pins = "dap3_dout_pp2";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap3_sclk_pp3 {
+ nvidia,pins = "dap3_sclk_pp3";
+ nvidia,function = "i2s2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_pw4 {
+ nvidia,pins = "dap_mclk1_pw4";
+ nvidia,function = "extperiph1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_ON */
+ pbb5 {
+ nvidia,pins = "pbb5";
+ nvidia,function = "vgp5";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis BKL1_PWM */
+ pu6 {
+ nvidia,pins = "pu6";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis CAM1_MCLK */
+ cam_mclk_pcc0 {
+ nvidia,pins = "cam_mclk_pcc0";
+ nvidia,function = "vi_alt3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis Digital Audio */
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_sclk_pa3 {
+ nvidia,pins = "dap2_sclk_pa3";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_din_pa4 {
+ nvidia,pins = "dap2_din_pa4";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dap2_dout_pa5 {
+ nvidia,pins = "dap2_dout_pa5";
+ nvidia,function = "hda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb3 { /* DAP1_RESET */
+ nvidia,pins = "pbb3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_out_pee0 {
+ nvidia,pins = "clk3_out_pee0";
+ nvidia,function = "extperiph3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis GPIO */
+ usb_vbus_en0_pn4 {
+ nvidia,pins = "usb_vbus_en0_pn4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ usb_vbus_en1_pn5 {
+ nvidia,pins = "usb_vbus_en1_pn5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+ pex_l0_rst_n_pdd1 {
+ nvidia,pins = "pex_l0_rst_n_pdd1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l0_clkreq_n_pdd2 {
+ nvidia,pins = "pex_l0_clkreq_n_pdd2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_rst_n_pdd5 {
+ nvidia,pins = "pex_l1_rst_n_pdd5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pex_l1_clkreq_n_pdd6 {
+ nvidia,pins = "pex_l1_clkreq_n_pdd6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ dp_hpd_pff0 {
+ nvidia,pins = "dp_hpd_pff0";
+ nvidia,function = "dp";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pff2 {
+ nvidia,pins = "pff2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ owr { /* PEX_L1_CLKREQ_N multiplexed GPIO6 */
+ nvidia,pins = "owr";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_CEC */
+ hdmi_cec_pee3 {
+ nvidia,pins = "hdmi_cec_pee3";
+ nvidia,function = "cec";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis HDMI1_HPD */
+ hdmi_int_pn7 {
+ nvidia,pins = "hdmi_int_pn7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis I2C1 */
+ gen1_i2c_scl_pc4 {
+ nvidia,pins = "gen1_i2c_scl_pc4";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ gen1_i2c_sda_pc5 {
+ nvidia,pins = "gen1_i2c_sda_pc5";
+ nvidia,function = "i2c1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C3 (CAM) */
+ cam_i2c_scl_pbb1 {
+ nvidia,pins = "cam_i2c_scl_pbb1";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ cam_i2c_sda_pbb2 {
+ nvidia,pins = "cam_i2c_sda_pbb2";
+ nvidia,function = "i2c3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis I2C4 (DDC) */
+ ddc_scl_pv4 {
+ nvidia,pins = "ddc_scl_pv4";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_ENABLE>;
+ };
+ ddc_sda_pv5 {
+ nvidia,pins = "ddc_sda_pv5";
+ nvidia,function = "i2c4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,rcv-sel = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis MMC1 */
+ sdmmc1_cd_n_pv3 { /* CD# GPIO */
+ nvidia,pins = "sdmmc1_wp_n_pv3";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_out_pw5 { /* D5 GPIO */
+ nvidia,pins = "clk2_out_pw5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat3_py4 {
+ nvidia,pins = "sdmmc1_dat3_py4";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat2_py5 {
+ nvidia,pins = "sdmmc1_dat2_py5";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat1_py6 {
+ nvidia,pins = "sdmmc1_dat1_py6";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_dat0_py7 {
+ nvidia,pins = "sdmmc1_dat0_py7";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_clk_pz0 {
+ nvidia,pins = "sdmmc1_clk_pz0";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc1_cmd_pz1 {
+ nvidia,pins = "sdmmc1_cmd_pz1";
+ nvidia,function = "sdmmc1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ clk2_req_pcc5 { /* D4 GPIO */
+ nvidia,pins = "clk2_req_pcc5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_clk_lb_in_pee5 { /* D6 GPIO */
+ nvidia,pins = "sdmmc3_clk_lb_in_pee5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ usb_vbus_en2_pff1 { /* D7 GPIO */
+ nvidia,pins = "usb_vbus_en2_pff1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis PWM */
+ ph0 {
+ nvidia,pins = "ph0";
+ nvidia,function = "pwm0";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph1 {
+ nvidia,pins = "ph1";
+ nvidia,function = "pwm1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph2 {
+ nvidia,pins = "ph2";
+ nvidia,function = "pwm2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /* PWM3 active on pu6 being Apalis BKL1_PWM as well */
+ ph3 {
+ nvidia,pins = "ph3";
+ nvidia,function = "pwm3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SATA1_ACT# */
+ dap1_dout_pn2 {
+ nvidia,pins = "dap1_dout_pn2";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SD1 */
+ sdmmc3_clk_pa6 {
+ nvidia,pins = "sdmmc3_clk_pa6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_cmd_pa7 {
+ nvidia,pins = "sdmmc3_cmd_pa7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat3_pb4 {
+ nvidia,pins = "sdmmc3_dat3_pb4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat2_pb5 {
+ nvidia,pins = "sdmmc3_dat2_pb5";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat1_pb6 {
+ nvidia,pins = "sdmmc3_dat1_pb6";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_dat0_pb7 {
+ nvidia,pins = "sdmmc3_dat0_pb7";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc3_cd_n_pv2 { /* CD# GPIO */
+ nvidia,pins = "sdmmc3_cd_n_pv2";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis SPDIF */
+ spdif_out_pk5 {
+ nvidia,pins = "spdif_out_pk5";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ spdif_in_pk6 {
+ nvidia,pins = "spdif_in_pk6";
+ nvidia,function = "spdif";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis SPI1 */
+ ulpi_clk_py0 {
+ nvidia,pins = "ulpi_clk_py0";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_dir_py1 {
+ nvidia,pins = "ulpi_dir_py1";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ ulpi_nxt_py2 {
+ nvidia,pins = "ulpi_nxt_py2";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_stp_py3 {
+ nvidia,pins = "ulpi_stp_py3";
+ nvidia,function = "spi1";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis SPI2 */
+ pg5 {
+ nvidia,pins = "pg5";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg6 {
+ nvidia,pins = "pg6";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg7 {
+ nvidia,pins = "pg7";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pi3 {
+ nvidia,pins = "pi3";
+ nvidia,function = "spi4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART1 */
+ pb1 { /* DCD GPIO */
+ nvidia,pins = "pb1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ pk7 { /* RI GPIO */
+ nvidia,pins = "pk7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_txd_pu0 {
+ nvidia,pins = "pu0";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart1_rxd_pu1 {
+ nvidia,pins = "pu1";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_cts_n_pu2 {
+ nvidia,pins = "pu2";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart1_rts_n_pu3 {
+ nvidia,pins = "pu3";
+ nvidia,function = "uarta";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_cts_n_pa1 { /* DSR GPIO */
+ nvidia,pins = "uart3_cts_n_pa1";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart3_rts_n_pc0 { /* DTR GPIO */
+ nvidia,pins = "uart3_rts_n_pc0";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART2 */
+ uart2_txd_pc2 {
+ nvidia,pins = "uart2_txd_pc2";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart2_rxd_pc3 {
+ nvidia,pins = "uart2_rxd_pc3";
+ nvidia,function = "irda";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_cts_n_pj5 {
+ nvidia,pins = "uart2_cts_n_pj5";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart2_rts_n_pj6 {
+ nvidia,pins = "uart2_rts_n_pj6";
+ nvidia,function = "uartb";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis UART3 */
+ uart3_txd_pw6 {
+ nvidia,pins = "uart3_txd_pw6";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ uart3_rxd_pw7 {
+ nvidia,pins = "uart3_rxd_pw7";
+ nvidia,function = "uartc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis UART4 */
+ uart4_rxd_pb0 {
+ nvidia,pins = "pb0";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ uart4_txd_pj7 {
+ nvidia,pins = "pj7";
+ nvidia,function = "uartd";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_EN */
+ gen2_i2c_sda_pt6 {
+ nvidia,pins = "gen2_i2c_sda_pt6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBH_OC# */
+ pbb0 {
+ nvidia,pins = "pbb0";
+ nvidia,function = "vgp6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis USBO1_EN */
+ gen2_i2c_scl_pt5 {
+ nvidia,pins = "gen2_i2c_scl_pt5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ nvidia,open-drain = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Apalis USBO1_OC# */
+ pbb4 {
+ nvidia,pins = "pbb4";
+ nvidia,function = "vgp4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* Apalis WAKE1_MICO */
+ pex_wake_n_pdd3 {
+ nvidia,pins = "pex_wake_n_pdd3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* CORE_PWR_REQ */
+ core_pwr_req {
+ nvidia,pins = "core_pwr_req";
+ nvidia,function = "pwron";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* CPU_PWR_REQ */
+ cpu_pwr_req {
+ nvidia,pins = "cpu_pwr_req";
+ nvidia,function = "cpu";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* DVFS */
+ dvfs_pwm_px0 {
+ nvidia,pins = "dvfs_pwm_px0";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dvfs_clk_px2 {
+ nvidia,pins = "dvfs_clk_px2";
+ nvidia,function = "cldvfs";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* eMMC */
+ sdmmc4_dat0_paa0 {
+ nvidia,pins = "sdmmc4_dat0_paa0";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat1_paa1 {
+ nvidia,pins = "sdmmc4_dat1_paa1";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat2_paa2 {
+ nvidia,pins = "sdmmc4_dat2_paa2";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat3_paa3 {
+ nvidia,pins = "sdmmc4_dat3_paa3";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat4_paa4 {
+ nvidia,pins = "sdmmc4_dat4_paa4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat5_paa5 {
+ nvidia,pins = "sdmmc4_dat5_paa5";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat6_paa6 {
+ nvidia,pins = "sdmmc4_dat6_paa6";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_dat7_paa7 {
+ nvidia,pins = "sdmmc4_dat7_paa7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_clk_pcc4 {
+ nvidia,pins = "sdmmc4_clk_pcc4";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ sdmmc4_cmd_pt7 {
+ nvidia,pins = "sdmmc4_cmd_pt7";
+ nvidia,function = "sdmmc4";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* JTAG_RTCK */
+ jtag_rtck {
+ nvidia,pins = "jtag_rtck";
+ nvidia,function = "rtck";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_DEV_OFF# */
+ ulpi_data5_po6 {
+ nvidia,pins = "ulpi_data5_po6";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_RESET# */
+ kb_row10_ps2 {
+ nvidia,pins = "kb_row10_ps2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* LAN_WAKE# */
+ ulpi_data4_po5 {
+ nvidia,pins = "ulpi_data4_po5";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT1# */
+ pk2 {
+ nvidia,pins = "pk2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT2# */
+ pj2 {
+ nvidia,pins = "pj2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT3# */
+ pi5 {
+ nvidia,pins = "pi5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_INT4# */
+ pj0 {
+ nvidia,pins = "pj0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* MCU_RESET */
+ pbb6 {
+ nvidia,pins = "pbb6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* MCU SPI */
+ gpio_x4_aud_px4 {
+ nvidia,pins = "gpio_x4_aud_px4";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x5_aud_px5 {
+ nvidia,pins = "gpio_x5_aud_px5";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x6_aud_px6 { /* MCU_CS */
+ nvidia,pins = "gpio_x6_aud_px6";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x7_aud_px7 {
+ nvidia,pins = "gpio_x7_aud_px7";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ gpio_w2_aud_pw2 { /* MCU_CSEZP */
+ nvidia,pins = "gpio_w2_aud_pw2";
+ nvidia,function = "spi2";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* PMIC_CLK_32K */
+ clk_32k_in {
+ nvidia,pins = "clk_32k_in";
+ nvidia,function = "clk";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PMIC_CPU_OC_INT */
+ clk_32k_out_pa0 {
+ nvidia,pins = "clk_32k_out_pa0";
+ nvidia,function = "soc";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_I2C */
+ pwr_i2c_scl_pz6 {
+ nvidia,pins = "pwr_i2c_scl_pz6";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+ pwr_i2c_sda_pz7 {
+ nvidia,pins = "pwr_i2c_sda_pz7";
+ nvidia,function = "i2cpwr";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ nvidia,open-drain = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* PWR_INT_N */
+ pwr_int_n {
+ nvidia,pins = "pwr_int_n";
+ nvidia,function = "pmi";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* RESET_MOCI_CTRL */
+ pu4 {
+ nvidia,pins = "pu4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* RESET_OUT_N */
+ reset_out_n {
+ nvidia,pins = "reset_out_n";
+ nvidia,function = "reset_out_n";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_IN */
+ kb_row0_pr0 {
+ nvidia,pins = "kb_row0_pr0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row1_pr1 {
+ nvidia,pins = "kb_row1_pr1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* Configure level-shifter as output for HDA */
+ kb_row11_ps3 {
+ nvidia,pins = "kb_row11_ps3";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_DIR_OUT */
+ kb_col5_pq5 {
+ nvidia,pins = "kb_col5_pq5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col6_pq6 {
+ nvidia,pins = "kb_col6_pq6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col7_pq7 {
+ nvidia,pins = "kb_col7_pq7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* SHIFT_CTRL_OE */
+ kb_col0_pq0 {
+ nvidia,pins = "kb_col0_pq0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col1_pq1 {
+ nvidia,pins = "kb_col1_pq1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col2_pq2 {
+ nvidia,pins = "kb_col2_pq2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col4_pq4 {
+ nvidia,pins = "kb_col4_pq4";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row2_pr2 {
+ nvidia,pins = "kb_row2_pr2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+
+ /* GPIO_PI6 aka TMP451 ALERT#/THERM2# */
+ pi6 {
+ nvidia,pins = "pi6";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_UP>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ /* TOUCH_INT */
+ gpio_w3_aud_pw3 {
+ nvidia,pins = "gpio_w3_aud_pw3";
+ nvidia,function = "spi6";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+
+ pc7 { /* NC */
+ nvidia,pins = "pc7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg0 { /* NC */
+ nvidia,pins = "pg0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg1 { /* NC */
+ nvidia,pins = "pg1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg2 { /* NC */
+ nvidia,pins = "pg2";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg3 { /* NC */
+ nvidia,pins = "pg3";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pg4 { /* NC */
+ nvidia,pins = "pg4";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph4 { /* NC */
+ nvidia,pins = "ph4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph5 { /* NC */
+ nvidia,pins = "ph5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph6 { /* NC */
+ nvidia,pins = "ph6";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ph7 { /* NC */
+ nvidia,pins = "ph7";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi0 { /* NC */
+ nvidia,pins = "pi0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi1 { /* NC */
+ nvidia,pins = "pi1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi2 { /* NC */
+ nvidia,pins = "pi2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi4 { /* NC */
+ nvidia,pins = "pi4";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pi7 { /* NC */
+ nvidia,pins = "pi7";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk0 { /* NC */
+ nvidia,pins = "pk0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk1 { /* NC */
+ nvidia,pins = "pk1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk3 { /* NC */
+ nvidia,pins = "pk3";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pk4 { /* NC */
+ nvidia,pins = "pk4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_fs_pn0 { /* NC */
+ nvidia,pins = "dap1_fs_pn0";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_din_pn1 { /* NC */
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap1_sclk_pn3 { /* NC */
+ nvidia,pins = "dap1_sclk_pn3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data7_po0 { /* NC */
+ nvidia,pins = "ulpi_data7_po0";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data0_po1 { /* NC */
+ nvidia,pins = "ulpi_data0_po1";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data1_po2 { /* NC */
+ nvidia,pins = "ulpi_data1_po2";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data2_po3 { /* NC */
+ nvidia,pins = "ulpi_data2_po3";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data3_po4 { /* NC */
+ nvidia,pins = "ulpi_data3_po4";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ ulpi_data6_po7 { /* NC */
+ nvidia,pins = "ulpi_data6_po7";
+ nvidia,function = "ulpi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_fs_pp4 { /* NC */
+ nvidia,pins = "dap4_fs_pp4";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_din_pp5 { /* NC */
+ nvidia,pins = "dap4_din_pp5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_dout_pp6 { /* NC */
+ nvidia,pins = "dap4_dout_pp6";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap4_sclk_pp7 { /* NC */
+ nvidia,pins = "dap4_sclk_pp7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_col3_pq3 { /* NC */
+ nvidia,pins = "kb_col3_pq3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row3_pr3 { /* NC */
+ nvidia,pins = "kb_row3_pr3";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row4_pr4 { /* NC */
+ nvidia,pins = "kb_row4_pr4";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row5_pr5 { /* NC */
+ nvidia,pins = "kb_row5_pr5";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row6_pr6 { /* NC */
+ nvidia,pins = "kb_row6_pr6";
+ nvidia,function = "kbc";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row7_pr7 { /* NC */
+ nvidia,pins = "kb_row7_pr7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row8_ps0 { /* NC */
+ nvidia,pins = "kb_row8_ps0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row9_ps1 { /* NC */
+ nvidia,pins = "kb_row9_ps1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row12_ps4 { /* NC */
+ nvidia,pins = "kb_row12_ps4";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row13_ps5 { /* NC */
+ nvidia,pins = "kb_row13_ps5";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row14_ps6 { /* NC */
+ nvidia,pins = "kb_row14_ps6";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row15_ps7 { /* NC */
+ nvidia,pins = "kb_row15_ps7";
+ nvidia,function = "rsvd3";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row16_pt0 { /* NC */
+ nvidia,pins = "kb_row16_pt0";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ kb_row17_pt1 { /* NC */
+ nvidia,pins = "kb_row17_pt1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pu5 { /* NC */
+ nvidia,pins = "pu5";
+ nvidia,function = "gmi";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /*
+ * PCB Version Indication: V1.2 and later have GPIO_PV0
+ * wired to GND, was NC before
+ */
+ pv0 {
+ nvidia,pins = "pv0";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pv1 { /* NC */
+ nvidia,pins = "pv1";
+ nvidia,function = "rsvd1";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x1_aud_px1 { /* NC */
+ nvidia,pins = "gpio_x1_aud_px1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ gpio_x3_aud_px3 { /* NC */
+ nvidia,pins = "gpio_x3_aud_px3";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pbb7 { /* NC */
+ nvidia,pins = "pbb7";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc1 { /* NC */
+ nvidia,pins = "pcc1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ pcc2 { /* NC */
+ nvidia,pins = "pcc2";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ clk3_req_pee1 { /* NC */
+ nvidia,pins = "clk3_req_pee1";
+ nvidia,function = "rsvd2";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ dap_mclk1_req_pee2 { /* NC */
+ nvidia,pins = "dap_mclk1_req_pee2";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ /*
+ * Leave SDMMC3_CLK_LB_OUT muxed as SDMMC3 with output
+ * driver enabled aka not tristated and input driver
+ * enabled as well as it features some magic properties
+ * even though the external loopback is disabled and the
+ * internal loopback used as per
+ * SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1
+ * bits being set to 0xfffd according to the TRM!
+ */
+ sdmmc3_clk_lb_out_pee4 { /* NC */
+ nvidia,pins = "sdmmc3_clk_lb_out_pee4";
+ nvidia,function = "sdmmc3";
+ nvidia,pull = <TEGRA_PIN_PULL_NONE>;
+ nvidia,tristate = <TEGRA_PIN_DISABLE>;
+ nvidia,enable-input = <TEGRA_PIN_ENABLE>;
+ };
+ };
+
+/* TBD */
+ pinmux_unused_lowpower: unused_lowpower {
+ dap1_din_pn1 {
+ nvidia,pins = "dap1_din_pn1";
+ nvidia,function = "rsvd4";
+ nvidia,pull = <TEGRA_PIN_PULL_DOWN>;
+ nvidia,tristate = <TEGRA_PIN_ENABLE>;
+ nvidia,enable-input = <TEGRA_PIN_DISABLE>;
+ };
+ };
+
+ drive_default: drive {
+ drive_sdio1 {
+ nvidia,pins = "drive_sdio1";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <32>;
+ nvidia,pull-up-strength = <42>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
+
+ drive_sdio3 {
+ nvidia,pins = "drive_sdio3";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <20>;
+ nvidia,pull-up-strength = <36>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ };
+
+ drive_gma {
+ nvidia,pins = "drive_gma";
+ nvidia,high-speed-mode = <TEGRA_PIN_ENABLE>;
+ nvidia,schmitt = <TEGRA_PIN_DISABLE>;
+ nvidia,pull-down-strength = <1>;
+ nvidia,pull-up-strength = <2>;
+ nvidia,slew-rate-rising = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,slew-rate-falling = <TEGRA_PIN_SLEW_RATE_FASTEST>;
+ nvidia,drive-type = <1>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/configs/apalis-tk1_defconfig b/arch/arm/configs/apalis-tk1_defconfig
new file mode 100644
index 000000000000..db3f5d042f97
--- /dev/null
+++ b/arch/arm/configs/apalis-tk1_defconfig
@@ -0,0 +1,443 @@
+CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_ELF_CORE is not set
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_TEGRA_12x_SOC=y
+CONFIG_MACH_APALIS_TK1=y
+CONFIG_TEGRA_EMC_SCALING_ENABLE=y
+CONFIG_TEGRA_CLOCK_DEBUG_WRITE=y
+CONFIG_TEGRA_EDP_LIMITS=y
+CONFIG_TEGRA_GPU_EDP=y
+CONFIG_TEGRA_GADGET_BOOST_CPU_FREQ=1400
+CONFIG_TEGRA_DYNAMIC_PWRDET=y
+CONFIG_TEGRA_PREINIT_CLOCKS=y
+CONFIG_TEGRA_PLLM_SCALED=y
+CONFIG_ARM_LPAE=y
+# CONFIG_CACHE_L2X0 is not set
+#TBD: ARM Erratas
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_HIGHMEM=y
+CONFIG_SECCOMP=y
+CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPUQUIET_FRAMEWORK=y
+CONFIG_CPUQUIET_DEFAULT_GOV_RUNNABLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_PM_RUNTIME=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_MHI=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_CAN=y
+CONFIG_CAN_MCP251X=y
+CONFIG_CAN_APALIS_TK1_K20=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_BNEP=m
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTUSB=m
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
+CONFIG_CAIF=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CMA=y
+CONFIG_PLATFORM_ENABLE_IOMMU=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SENSORS_NCT1008=y
+CONFIG_TEGRA_CRYPTO_DEV=y
+CONFIG_THERM_EST=y
+CONFIG_FAN_THERM_EST=y
+CONFIG_TEGRA_PROFILER=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=m
+CONFIG_SATA_AHCI_TEGRA=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_E1000E=m
+CONFIG_IGB=y
+#TBD: IGB_PTP
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOLAC=m
+CONFIG_PPPOPNS=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_BELKIN is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_JOYDEV=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_INPUT_CFBOOST=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_APALIS_TK1_K20=m
+CONFIG_TOUCHSCREEN_FUSION_F0710A=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=m
+CONFIG_SERIAL_8250_NR_UARTS=9
+CONFIG_SERIAL_8250_RUNTIME_UARTS=9
+CONFIG_SERIAL_TEGRA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_TEGRA=y
+CONFIG_SPI=y
+CONFIG_SPI_TEGRA114=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_PINCTRL_AS3722=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_APALIS_TK1_K20=m
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_THERMAL_GOV_PID=y
+CONFIG_GENERIC_ADC_THERMAL=y
+CONFIG_PWM_FAN=y
+CONFIG_SENSORS_TMP006=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_TEGRA_WATCHDOG=y
+CONFIG_TEGRA_WATCHDOG_ENABLE_ON_PROBE=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_MFD_APALIS_TK1_K20=m
+CONFIG_APALIS_TK1_K20_EZP=y
+CONFIG_MFD_AS3722=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_USERSPACE_CONSUMER=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_AS3722=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_MEDIA_SUPPORT=y
+#TBD
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+# CONFIG_USB_GSPCA is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+# CONFIG_TEGRA_RPC is not set
+CONFIG_TEGRA_NVAVP=y
+CONFIG_TEGRA_NVAVP_AUDIO=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_PLATFORM=m
+CONFIG_VIDEO_TEGRA=m
+CONFIG_SOC_CAMERA_AR0261=m
+CONFIG_SOC_CAMERA_AR0330=m
+CONFIG_SOC_CAMERA_AP1302=m
+CONFIG_SOC_CAMERA_IMX135=m
+CONFIG_SOC_CAMERA_OV5640=m
+CONFIG_SOC_CAMERA_TC358743=m
+CONFIG_SOC_CAMERA_ADV7280=m
+# CONFIG_VGA_ARB is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_TEGRA_GRHOST=y
+CONFIG_TEGRA_GRHOST_VI=m
+CONFIG_TEGRA_DC=y
+CONFIG_TEGRA_DP=y
+CONFIG_TEGRA_LVDS=y
+# CONFIG_TEGRA_CAMERA is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+CONFIG_BACKLIGHT_TEGRA_PWM=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_PLATFORM_DRIVER=y
+CONFIG_SND_HDA_PLATFORM_NVIDIA_TEGRA=y
+# CONFIG_SND_HDA_CODEC_ANALOG is not set
+# CONFIG_SND_HDA_CODEC_SIGMATEL is not set
+# CONFIG_SND_HDA_CODEC_VIA is not set
+# CONFIG_SND_HDA_CODEC_CIRRUS is not set
+# CONFIG_SND_HDA_CODEC_CONEXANT is not set
+# CONFIG_SND_HDA_CODEC_CA0110 is not set
+# CONFIG_SND_HDA_CODEC_CA0132 is not set
+# CONFIG_SND_HDA_CODEC_CMEDIA is not set
+# CONFIG_SND_HDA_CODEC_SI3054 is not set
+CONFIG_SND_HDA_POWER_SAVE_DEFAULT=10
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_APALIS_TK1=y
+CONFIG_HIDRAW=y
+CONFIG_HID_MULTITOUCH=m
+CONFIG_USB_HIDDEV=y
+CONFIG_I2C_HID=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+CONFIG_USB_XHCI_HCD=y
+CONFIG_TEGRA_XUSB_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_ACM=m
+CONFIG_USB_WDM=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_PL2303=y
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_BASEBAND=m
+CONFIG_USB_TEGRA_OTG=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_TEGRA=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_TEGRA=y
+#TBD
+CONFIG_MMC_SDHCI_TEGRA_HS200_DISABLE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_AS3722=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_DMADEVICES=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_STAGING=y
+#TBD
+CONFIG_AS3722_ADC_EXTCON=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_TEGRA_MC_DOMAINS=y
+CONFIG_CLK_AS3722=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_PM_DEVFREQ=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_APALIS_TK1_K20_ADC=m
+CONFIG_PWM=y
+CONFIG_PWM_TEGRA=y
+CONFIG_GK20A_PMU=y
+CONFIG_GK20A_DEVFREQ=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_ZLIB=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_FTRACE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_FUNCTION_GRAPH_TRACER is not set
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_TEGRA_SE=y
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index f89515adac60..2bb8cac28b9e 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -45,7 +45,7 @@ void *return_address(unsigned int);
#else
-extern inline void *return_address(unsigned int level)
+static inline void *return_address(unsigned int level)
{
return NULL;
}
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index fafedd86885d..f6aa84d5b93c 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -63,11 +63,6 @@ void *return_address(unsigned int level)
#warning "TODO: return_address should use unwind tables"
#endif
-void *return_address(unsigned int level)
-{
- return NULL;
-}
-
#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) / else */
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d79a30000f93..8385f1f30aca 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -107,6 +107,8 @@ config ARCH_TEGRA_12x_SOC
select TEGRA_ISOMGR
select TEGRA_ISOMGR_SYSFS
select PROC_DEVICETREE
+ select ZONE_DMA if ARM_LPAE
+ select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
help
Support for NVIDIA Tegra 12x family of SoCs, based upon the
ARM Cortex-A15MP CPU
@@ -163,6 +165,15 @@ config MACH_ARDBEG
help
Support for NVIDIA ARDBEG Development platform
+config MACH_APALIS_TK1
+ bool "Toradex Apalis TK1 Module"
+ depends on ARCH_TEGRA_12x_SOC
+ select MACH_HAS_SND_SOC_TEGRA_SGTL5000 if SND_SOC
+ select SYSEDP_FRAMEWORK
+ help
+ Support for Toradex Apalis TK1 module on Apalis evaluation carrier
+ board
+
config MACH_LOKI
bool "Loki board"
depends on ARCH_TEGRA_12x_SOC
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 101192d8ba04..d3092be73044 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -144,6 +144,18 @@ obj-y += panel-p-wuxga-10-1.o
obj-y += panel-lgd-wxga-7-0.o
obj-y += panel-s-wqxga-10-1.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1-sdhci.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1-sensors.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1-panel.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1-memory.o
+obj-${CONFIG_MACH_APALIS_TK1} += board-apalis-tk1-power.o
+obj-${CONFIG_MACH_APALIS_TK1} += panel-a-edp-1080p-14-0.o
+obj-${CONFIG_MACH_APALIS_TK1} += panel-c-lvds-1366-14.o
+ifeq ($(CONFIG_MACH_APALIS_TK1),y)
+obj-${CONFIG_SYSEDP_FRAMEWORK} += board-apalis-tk1-sysedp.o
+endif
+
obj-${CONFIG_MACH_ARDBEG} += board-ardbeg.o
obj-${CONFIG_MACH_ARDBEG} += board-ardbeg-sdhci.o
obj-${CONFIG_MACH_ARDBEG} += board-ardbeg-sensors.o
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-memory.c b/arch/arm/mach-tegra/board-apalis-tk1-memory.c
new file mode 100644
index 000000000000..88c2d48a26fd
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-memory.c
@@ -0,0 +1,2780 @@
+/*
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tegra-fuse.h>
+#include <linux/platform_data/tegra_emc_pdata.h>
+
+#include "board.h"
+#include "board-apalis-tk1.h"
+#include "tegra-board-id.h"
+#include "tegra12_emc.h"
+#include "devices.h"
+
+static struct tegra12_emc_table apalis_tk1_ddr3_emc_table[] = {
+ {
+ 0x19, /* V5.0.18 */
+ "001_12750_01_V5.0.18_V1.1", /* DVFS table version */
+ 12750, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x4000003e, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000000, /* EMC_RC */
+ 0x00000003, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000000, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000005, /* EMC_EINPUT */
+ 0x00000005, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x0000000f, /* EMC_RDV_MASK */
+ 0x00000060, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000018, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000005, /* EMC_TXSR */
+ 0x00000005, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000000, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000064, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000007, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000f2f3, /* EMC_CFG_PIPE */
+ 0x800001c5, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0x40040001, /* MC_EMEM_ARB_CFG */
+ 0x8000000a, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */
+ 0x77e30303, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000007, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x000008c5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 57820, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_20400_01_V5.0.18_V1.1", /* DVFS table version */
+ 20400, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x40000026, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000000, /* EMC_RC */
+ 0x00000005, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000000, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000005, /* EMC_EINPUT */
+ 0x00000005, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x0000000f, /* EMC_RDV_MASK */
+ 0x0000009a, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000026, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000007, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000006, /* EMC_TXSR */
+ 0x00000006, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000000, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x000000a0, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x0000000b, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000f2f3, /* EMC_CFG_PIPE */
+ 0x8000023a, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0x40020001, /* MC_EMEM_ARB_CFG */
+ 0x80000012, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */
+ 0x76230303, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x0000000a, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x000008c5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 35610, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_40800_01_V5.0.18_V1.1", /* DVFS table version */
+ 40800, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x40000012, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000001, /* EMC_RC */
+ 0x0000000a, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000001, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000005, /* EMC_EINPUT */
+ 0x00000005, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x0000000f, /* EMC_RDV_MASK */
+ 0x00000134, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x0000004d, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000008, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000000c, /* EMC_TXSR */
+ 0x0000000c, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000000, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000013f, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000015, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000f2f3, /* EMC_CFG_PIPE */
+ 0x80000370, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0xa0000001, /* MC_EMEM_ARB_CFG */
+ 0x80000017, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74a30303, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000014, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x000008c5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 20850, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_68000_01_V5.0.18_V1.1", /* DVFS table version */
+ 68000, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x4000000a, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000003, /* EMC_RC */
+ 0x00000011, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000002, /* EMC_RAS */
+ 0x00000000, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000000, /* EMC_RD_RCD */
+ 0x00000000, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000005, /* EMC_EINPUT */
+ 0x00000005, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x0000000f, /* EMC_RDV_MASK */
+ 0x00000202, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000080, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000000f, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000013, /* EMC_TXSR */
+ 0x00000013, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000001, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000213, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000022, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000f2f3, /* EMC_CFG_PIPE */
+ 0x8000050e, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0x00000001, /* MC_EMEM_ARB_CFG */
+ 0x8000001e, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0502, /* MC_EMEM_ARB_DA_COVERS */
+ 0x74230403, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000021, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff00b0, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff00ec, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00e90049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x000800ff, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff00a3, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x000000ef, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00ee00ef, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x000008c5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 10720, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_102000_01_V5.0.18_V1.1", /* DVFS table version */
+ 102000, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x40000006, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000004, /* EMC_RC */
+ 0x0000001a, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000003, /* EMC_RAS */
+ 0x00000001, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000001, /* EMC_RD_RCD */
+ 0x00000001, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000005, /* EMC_EINPUT */
+ 0x00000005, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000004, /* EMC_QRST */
+ 0x0000000c, /* EMC_QSAFE */
+ 0x0000000d, /* EMC_RDV */
+ 0x0000000f, /* EMC_RDV_MASK */
+ 0x00000304, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000000c1, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000018, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000001c, /* EMC_TXSR */
+ 0x0000001c, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000003, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x0000031c, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000033, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00000000, /* EMC_ZCAL_INTERVAL */
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000f2f3, /* EMC_CFG_PIPE */
+ 0x80000713, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0x08000001, /* MC_EMEM_ARB_CFG */
+ 0x80000026, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000000, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06030203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0503, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73c30504, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000031, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff0075, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff009d, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x009b0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x000800ad, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff00d6, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x0000009f, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x009f00a0, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff00da, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x000008c5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 6890, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_204000_01_V5.0.18_V1.1", /* DVFS table version */
+ 204000, /* SDRAM frequency */
+ 800, /* min voltage */
+ 800, /* gpu min voltage */
+ "pllp_out0", /* clock source id */
+ 0x40000002, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000009, /* EMC_RC */
+ 0x00000035, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000006, /* EMC_RAS */
+ 0x00000002, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x0000000a, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x0000000b, /* EMC_W2P */
+ 0x00000002, /* EMC_RD_RCD */
+ 0x00000002, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000003, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_WDV_MASK */
+ 0x00000006, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000004, /* EMC_EINPUT */
+ 0x00000006, /* EMC_EINPUT_DURATION */
+ 0x00010000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000003, /* EMC_QRST */
+ 0x0000000d, /* EMC_QSAFE */
+ 0x0000000f, /* EMC_RDV */
+ 0x00000011, /* EMC_RDV_MASK */
+ 0x00000607, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000181, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x00000002, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000032, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x00000038, /* EMC_TXSR */
+ 0x00000038, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000007, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000638, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x106aa298, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00064000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00064000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00010000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0130b118, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x0000003f, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000066, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT */
+ 0x000e000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x0000d2b3, /* EMC_CFG_PIPE */
+ 0x80000d22, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000a, /* EMC_QPOP */
+ 0x01000003, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06040203, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000a0504, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73840a05, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000001, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000062, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff00af, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff004f, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x004e0049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x00080057, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff0063, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff0036, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff006b, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000050, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510050, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff00c6, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff006d, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73240000, /* EMC_CFG */
+ 0x0000088d, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000008, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80001221, /* Mode Register 0 */
+ 0x80100003, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 3420, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_300000_01_V5.0.18_V1.1", /* DVFS table version */
+ 300000, /* SDRAM frequency */
+ 820, /* min voltage */
+ 820, /* gpu min voltage */
+ "pllc_out0", /* clock source id */
+ 0x20000002, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x0000000d, /* EMC_RC */
+ 0x0000004d, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000009, /* EMC_RAS */
+ 0x00000003, /* EMC_RP */
+ 0x00000004, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x00000009, /* EMC_W2P */
+ 0x00000003, /* EMC_RD_RCD */
+ 0x00000003, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000003, /* EMC_WDV */
+ 0x00000003, /* EMC_WDV_MASK */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000002, /* EMC_EINPUT */
+ 0x00000007, /* EMC_EINPUT_DURATION */
+ 0x00020000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000001, /* EMC_QRST */
+ 0x0000000e, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x00000012, /* EMC_RDV_MASK */
+ 0x000008e4, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000239, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x0000004b, /* EMC_AR2PDEN */
+ 0x0000000e, /* EMC_RW2PDEN */
+ 0x00000052, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000009, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000924, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab098, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00030000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00098000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x01231339, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451420, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000096, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0173000e, /* EMC_MRS_WAIT_CNT */
+ 0x0173000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x000052a3, /* EMC_CFG_PIPE */
+ 0x800012d7, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000009, /* EMC_QPOP */
+ 0x08000004, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000b0607, /* MC_EMEM_ARB_DA_COVERS */
+ 0x77450e08, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000004, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000090, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00350049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x0008003b, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff0043, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff002d, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff0049, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510036, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff0087, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff004a, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73340000, /* EMC_CFG */
+ 0x000008d5, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000321, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 2680, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_396000_01_V5.0.18_V1.1", /* DVFS table version */
+ 396000, /* SDRAM frequency */
+ 850, /* min voltage */
+ 850, /* gpu min voltage */
+ "pllm_out0", /* clock source id */
+ 0x00000002, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000011, /* EMC_RC */
+ 0x00000066, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x0000000c, /* EMC_RAS */
+ 0x00000004, /* EMC_RP */
+ 0x00000005, /* EMC_R2W */
+ 0x00000008, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000a, /* EMC_W2P */
+ 0x00000004, /* EMC_RD_RCD */
+ 0x00000004, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000003, /* EMC_WDV */
+ 0x00000003, /* EMC_WDV_MASK */
+ 0x00000005, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000001, /* EMC_EINPUT */
+ 0x00000008, /* EMC_EINPUT_DURATION */
+ 0x00020000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000000, /* EMC_QRST */
+ 0x0000000f, /* EMC_QSAFE */
+ 0x00000010, /* EMC_RDV */
+ 0x00000012, /* EMC_RDV_MASK */
+ 0x00000bd1, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000002f4, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000001, /* EMC_PDEX2WR */
+ 0x00000008, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000063, /* EMC_AR2PDEN */
+ 0x0000000f, /* EMC_RW2PDEN */
+ 0x0000006c, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x0000000d, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000005, /* EMC_TCLKSTABLE */
+ 0x00000005, /* EMC_TCLKSTOP */
+ 0x00000c11, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab098, /* EMC_FBIO_CFG5 */
+ 0x002c00a0, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x00030000, /* EMC_DLL_XFORM_DQS0 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS1 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS2 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS3 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS4 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS5 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS6 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS7 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS8 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS9 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS10 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS11 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS12 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS13 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS14 */
+ 0x00030000, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00070000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ0 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ1 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ2 */
+ 0x00048000, /* EMC_DLL_XFORM_DQ3 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ4 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ5 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ6 */
+ 0x00004800, /* EMC_DLL_XFORM_DQ7 */
+ 0x10000280, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x01231339, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc081, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451420, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0000003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x000000c6, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x015b000e, /* EMC_MRS_WAIT_CNT */
+ 0x015b000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x000052a3, /* EMC_CFG_PIPE */
+ 0x8000188b, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000009, /* EMC_QPOP */
+ 0x0f000005, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06040202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x000d0709, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7586120a, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x0000000a, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x000000be, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00ff003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00ff0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00ff0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00280049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00ff0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x0008002d, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00ff0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00ff0033, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00ff0022, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00ff0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00ff0037, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000ff, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00ff00ff, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00ff0066, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00ff0038, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73340000, /* EMC_CFG */
+ 0x00000895, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0x002c0068, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000521, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200000, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 2180, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_528000_01_V5.0.18_V1.1", /* DVFS table version */
+ 528000, /* SDRAM frequency */
+ 880, /* min voltage */
+ 870, /* gpu min voltage */
+ "pllm_ud", /* clock source id */
+ 0x80000000, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000018, /* EMC_RC */
+ 0x00000088, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000010, /* EMC_RAS */
+ 0x00000006, /* EMC_RP */
+ 0x00000006, /* EMC_R2W */
+ 0x00000009, /* EMC_W2R */
+ 0x00000002, /* EMC_R2P */
+ 0x0000000d, /* EMC_W2P */
+ 0x00000006, /* EMC_RD_RCD */
+ 0x00000006, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000003, /* EMC_WDV */
+ 0x00000003, /* EMC_WDV_MASK */
+ 0x00000007, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000002, /* EMC_EINPUT */
+ 0x00000009, /* EMC_EINPUT_DURATION */
+ 0x00040000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000001, /* EMC_QRST */
+ 0x00000010, /* EMC_QSAFE */
+ 0x00000013, /* EMC_RDV */
+ 0x00000015, /* EMC_RDV_MASK */
+ 0x00000fd6, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000003f5, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x0000000b, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000085, /* EMC_AR2PDEN */
+ 0x00000012, /* EMC_RW2PDEN */
+ 0x00000090, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000013, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000006, /* EMC_TCLKSTABLE */
+ 0x00000006, /* EMC_TCLKSTOP */
+ 0x00001017, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab098, /* EMC_FBIO_CFG5 */
+ 0xe01200b1, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00050000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00050000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00050000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00050000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ3 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ4 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ5 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ6 */
+ 0x0000000e, /* EMC_DLL_XFORM_DQ7 */
+ 0x100002a0, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0123133d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc085, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451420, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000000, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0139000e, /* EMC_MRS_WAIT_CNT */
+ 0x0139000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x000042a0, /* EMC_CFG_PIPE */
+ 0x80002062, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000b, /* EMC_QPOP */
+ 0x0f000007, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000a, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x06050202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x0010090c, /* MC_EMEM_ARB_DA_COVERS */
+ 0x7428180d, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x0000000d, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x000000fd, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00c10038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00c1003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00c10090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00c10041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00c10080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00c10004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x00080021, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000c1, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00c10004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00c10026, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00c1001a, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00c10024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00c10029, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000c1, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00c100c1, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00c10065, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00c1002a, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73300000, /* EMC_CFG */
+ 0x0000089d, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0xe0120069, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000941, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200008, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 1440, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_600000_01_V5.0.18_V1.1", /* DVFS table version */
+ 600000, /* SDRAM frequency */
+ 910, /* min voltage */
+ 910, /* gpu min voltage */
+ "pllc_ud", /* clock source id */
+ 0xe0000000, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x0000001b, /* EMC_RC */
+ 0x0000009b, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000013, /* EMC_RAS */
+ 0x00000007, /* EMC_RP */
+ 0x00000007, /* EMC_R2W */
+ 0x0000000b, /* EMC_W2R */
+ 0x00000003, /* EMC_R2P */
+ 0x00000010, /* EMC_W2P */
+ 0x00000007, /* EMC_RD_RCD */
+ 0x00000007, /* EMC_WR_RCD */
+ 0x00000002, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000005, /* EMC_WDV */
+ 0x00000005, /* EMC_WDV_MASK */
+ 0x0000000a, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000003, /* EMC_EINPUT */
+ 0x0000000b, /* EMC_EINPUT_DURATION */
+ 0x00070000, /* EMC_PUTERM_EXTRA */
+ 0x00000003, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000002, /* EMC_QRST */
+ 0x00000012, /* EMC_QSAFE */
+ 0x00000016, /* EMC_RDV */
+ 0x00000018, /* EMC_RDV_MASK */
+ 0x00001208, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x00000482, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000002, /* EMC_PDEX2WR */
+ 0x0000000d, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x00000097, /* EMC_AR2PDEN */
+ 0x00000015, /* EMC_RW2PDEN */
+ 0x000000a3, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000004, /* EMC_TCKE */
+ 0x00000005, /* EMC_TCKESR */
+ 0x00000004, /* EMC_TPD */
+ 0x00000015, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000006, /* EMC_TCLKSTABLE */
+ 0x00000006, /* EMC_TCLKSTOP */
+ 0x00001248, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab098, /* EMC_FBIO_CFG5 */
+ 0xe00e00b1, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQS7 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00048000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00048000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00048000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00048000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000000, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ4 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ5 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ6 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ7 */
+ 0x100002a0, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0121113d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc085, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x51451420, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x51451400, /* EMC_XM2DQSPADCTRL6 */
+ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000000, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x0127000e, /* EMC_MRS_WAIT_CNT */
+ 0x0127000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000003, /* EMC_CTT_DURATION */
+ 0x000040a0, /* EMC_CFG_PIPE */
+ 0x800024aa, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000e, /* EMC_QPOP */
+ 0x00000009, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RP */
+ 0x0000000e, /* MC_EMEM_ARB_TIMING_RC */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000001, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000b, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x07050202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00130b0e, /* MC_EMEM_ARB_DA_COVERS */
+ 0x73a91b0f, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f03, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x0000000f, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x00000120, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00aa0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x00aa003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00aa0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00aa0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00aa0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x0008001d, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x000000aa, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00aa0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00aa0022, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00aa0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x00aa0024, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x000000aa, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00aa00aa, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00aa0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x00aa0025, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73300000, /* EMC_CFG */
+ 0x0000089d, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0xe00e0069, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000b61, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200010, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 1440, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_792000_01_V5.0.18_V1.1", /* DVFS table version */
+ 792000, /* SDRAM frequency */
+ 980, /* min voltage */
+ 980, /* gpu min voltage */
+ "pllm_ud", /* clock source id */
+ 0x80000000, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x00000024, /* EMC_RC */
+ 0x000000cd, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x00000019, /* EMC_RAS */
+ 0x0000000a, /* EMC_RP */
+ 0x00000008, /* EMC_R2W */
+ 0x0000000d, /* EMC_W2R */
+ 0x00000004, /* EMC_R2P */
+ 0x00000013, /* EMC_W2P */
+ 0x0000000a, /* EMC_RD_RCD */
+ 0x0000000a, /* EMC_WR_RCD */
+ 0x00000003, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000006, /* EMC_WDV */
+ 0x00000006, /* EMC_WDV_MASK */
+ 0x0000000b, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000002, /* EMC_EINPUT */
+ 0x0000000d, /* EMC_EINPUT_DURATION */
+ 0x00080000, /* EMC_PUTERM_EXTRA */
+ 0x00000004, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000001, /* EMC_QRST */
+ 0x00000014, /* EMC_QSAFE */
+ 0x00000018, /* EMC_RDV */
+ 0x0000001a, /* EMC_RDV_MASK */
+ 0x000017e2, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000005f8, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000003, /* EMC_PDEX2WR */
+ 0x00000011, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x000000c7, /* EMC_AR2PDEN */
+ 0x00000018, /* EMC_RW2PDEN */
+ 0x000000d7, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000005, /* EMC_TCKE */
+ 0x00000006, /* EMC_TCKESR */
+ 0x00000005, /* EMC_TPD */
+ 0x0000001d, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x00000008, /* EMC_TCLKSTABLE */
+ 0x00000008, /* EMC_TCLKSTOP */
+ 0x00001822, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab098, /* EMC_FBIO_CFG5 */
+ 0xe00700b1, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS8 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS9 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS10 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS11 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS12 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS13 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS14 */
+ 0x00000008, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x00034000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x00034000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x00034000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x00034000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x0000000b, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ3 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ4 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ5 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ6 */
+ 0x0000000c, /* EMC_DLL_XFORM_DQ7 */
+ 0x100002a0, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc085, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x59659620, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x59659600, /* EMC_XM2DQSPADCTRL6 */
+ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000000, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000100, /* EMC_ZCAL_WAIT_CNT */
+ 0x00f7000e, /* EMC_MRS_WAIT_CNT */
+ 0x00f7000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000004, /* EMC_CTT_DURATION */
+ 0x00004080, /* EMC_CFG_PIPE */
+ 0x80003012, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x0000000f, /* EMC_QPOP */
+ 0x0e00000b, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000013, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x0000000f, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000003, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000c, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000008, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x08060202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x00170e13, /* MC_EMEM_ARB_DA_COVERS */
+ 0x736c2414, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f02, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000013, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x0000017c, /* MC_PTSA_GRANT_DECREMENT */
+ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x00810038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x0081003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x00810090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x00810041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x00810080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x00810004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x00000081, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x00810004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x00810019, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x00810018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x00810024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x0081001c, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x00000081, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x00810081, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x00810081, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x00810065, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x0081001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x00000042, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73300000, /* EMC_CFG */
+ 0x0000089d, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0xe0070069, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000d71, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200018, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 1200, /* expected dvfs latency (ns) */
+ },
+ {
+ 0x19, /* V5.0.18 */
+ "001_924000_01_V5.0.18_V1.1", /* DVFS table version */
+ 924000, /* SDRAM frequency */
+ 1010, /* min voltage */
+ 1010, /* gpu min voltage */
+ "pllm_ud", /* clock source id */
+ 0x80000000, /* CLK_SOURCE_EMC */
+ 165, /* number of burst_regs */
+ 31, /* number of up_down_regs */
+ {
+ 0x0000002b, /* EMC_RC */
+ 0x000000f0, /* EMC_RFC */
+ 0x00000000, /* EMC_RFC_SLR */
+ 0x0000001e, /* EMC_RAS */
+ 0x0000000b, /* EMC_RP */
+ 0x0000000a, /* EMC_R2W */
+ 0x0000000f, /* EMC_W2R */
+ 0x00000005, /* EMC_R2P */
+ 0x00000016, /* EMC_W2P */
+ 0x0000000b, /* EMC_RD_RCD */
+ 0x0000000b, /* EMC_WR_RCD */
+ 0x00000004, /* EMC_RRD */
+ 0x00000002, /* EMC_REXT */
+ 0x00000000, /* EMC_WEXT */
+ 0x00000007, /* EMC_WDV */
+ 0x00000007, /* EMC_WDV_MASK */
+ 0x0000000d, /* EMC_QUSE */
+ 0x00000002, /* EMC_QUSE_WIDTH */
+ 0x00000000, /* EMC_IBDLY */
+ 0x00000002, /* EMC_EINPUT */
+ 0x0000000f, /* EMC_EINPUT_DURATION */
+ 0x000a0000, /* EMC_PUTERM_EXTRA */
+ 0x00000004, /* EMC_PUTERM_WIDTH */
+ 0x00000000, /* EMC_PUTERM_ADJ */
+ 0x00000000, /* EMC_CDB_CNTL_1 */
+ 0x00000000, /* EMC_CDB_CNTL_2 */
+ 0x00000000, /* EMC_CDB_CNTL_3 */
+ 0x00000001, /* EMC_QRST */
+ 0x00000016, /* EMC_QSAFE */
+ 0x0000001a, /* EMC_RDV */
+ 0x0000001c, /* EMC_RDV_MASK */
+ 0x00001be7, /* EMC_REFRESH */
+ 0x00000000, /* EMC_BURST_REFRESH_NUM */
+ 0x000006f9, /* EMC_PRE_REFRESH_REQ_CNT */
+ 0x00000004, /* EMC_PDEX2WR */
+ 0x00000015, /* EMC_PDEX2RD */
+ 0x00000001, /* EMC_PCHG2PDEN */
+ 0x00000000, /* EMC_ACT2PDEN */
+ 0x000000e7, /* EMC_AR2PDEN */
+ 0x0000001b, /* EMC_RW2PDEN */
+ 0x000000fb, /* EMC_TXSR */
+ 0x00000200, /* EMC_TXSRDLL */
+ 0x00000006, /* EMC_TCKE */
+ 0x00000007, /* EMC_TCKESR */
+ 0x00000006, /* EMC_TPD */
+ 0x00000022, /* EMC_TFAW */
+ 0x00000000, /* EMC_TRPAB */
+ 0x0000000a, /* EMC_TCLKSTABLE */
+ 0x0000000a, /* EMC_TCLKSTOP */
+ 0x00001c28, /* EMC_TREFBW */
+ 0x00000000, /* EMC_FBIO_CFG6 */
+ 0x00000000, /* EMC_ODT_WRITE */
+ 0x00000000, /* EMC_ODT_READ */
+ 0x104ab898, /* EMC_FBIO_CFG5 */
+ 0xe00400b1, /* EMC_CFG_DIG_DLL */
+ 0x00008000, /* EMC_CFG_DIG_DLL_PERIOD */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS0 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS1 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS2 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS3 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS4 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS5 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS6 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS7 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS8 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS9 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS10 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS11 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS12 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS13 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS14 */
+ 0x0000000a, /* EMC_DLL_XFORM_DQS15 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE0 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE1 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE2 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE3 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE4 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE6 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE7 */
+ 0x0002c000, /* EMC_DLL_XFORM_ADDR0 */
+ 0x0002c000, /* EMC_DLL_XFORM_ADDR1 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR2 */
+ 0x0002c000, /* EMC_DLL_XFORM_ADDR3 */
+ 0x0002c000, /* EMC_DLL_XFORM_ADDR4 */
+ 0x00000000, /* EMC_DLL_XFORM_ADDR5 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE8 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE9 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE10 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE11 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE12 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE13 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE14 */
+ 0x00000000, /* EMC_DLL_XFORM_QUSE15 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS0 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS1 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS2 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS3 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS4 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS5 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS6 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS7 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS8 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS9 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS10 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS11 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS12 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS13 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS14 */
+ 0x00000006, /* EMC_DLI_TRIM_TXDQS15 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ0 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ1 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ2 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ3 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ4 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ5 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ6 */
+ 0x0000000d, /* EMC_DLL_XFORM_DQ7 */
+ 0x100002a0, /* EMC_XM2CMDPADCTRL */
+ 0x00000000, /* EMC_XM2CMDPADCTRL4 */
+ 0x00111111, /* EMC_XM2CMDPADCTRL5 */
+ 0x0120113d, /* EMC_XM2DQSPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL2 */
+ 0x00000000, /* EMC_XM2DQPADCTRL3 */
+ 0x77ffc085, /* EMC_XM2CLKPADCTRL */
+ 0x00000404, /* EMC_XM2CLKPADCTRL2 */
+ 0x81f1f108, /* EMC_XM2COMPPADCTRL */
+ 0x07070004, /* EMC_XM2VTTGENPADCTRL */
+ 0x00000000, /* EMC_XM2VTTGENPADCTRL2 */
+ 0x016eeeee, /* EMC_XM2VTTGENPADCTRL3 */
+ 0x5d75d720, /* EMC_XM2DQSPADCTRL3 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL4 */
+ 0x00514514, /* EMC_XM2DQSPADCTRL5 */
+ 0x5d75d700, /* EMC_XM2DQSPADCTRL6 */
+ 0x0606003f, /* EMC_DSR_VTTGEN_DRV */
+ 0x00000000, /* EMC_TXDSRVTTGEN */
+ 0x00000000, /* EMC_FBIO_SPARE */
+ 0x00020000, /* EMC_ZCAL_INTERVAL */
+ 0x00000128, /* EMC_ZCAL_WAIT_CNT */
+ 0x00cd000e, /* EMC_MRS_WAIT_CNT */
+ 0x00cd000e, /* EMC_MRS_WAIT_CNT2 */
+ 0x00000000, /* EMC_CTT */
+ 0x00000004, /* EMC_CTT_DURATION */
+ 0x00004080, /* EMC_CFG_PIPE */
+ 0x800037ea, /* EMC_DYN_SELF_REF_CONTROL */
+ 0x00000011, /* EMC_QPOP */
+ 0x0e00000d, /* MC_EMEM_ARB_CFG */
+ 0x80000040, /* MC_EMEM_ARB_OUTSTANDING_REQ */
+ 0x00000005, /* MC_EMEM_ARB_TIMING_RCD */
+ 0x00000006, /* MC_EMEM_ARB_TIMING_RP */
+ 0x00000016, /* MC_EMEM_ARB_TIMING_RC */
+ 0x0000000e, /* MC_EMEM_ARB_TIMING_RAS */
+ 0x00000011, /* MC_EMEM_ARB_TIMING_FAW */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_RRD */
+ 0x00000004, /* MC_EMEM_ARB_TIMING_RAP2PRE */
+ 0x0000000e, /* MC_EMEM_ARB_TIMING_WAP2PRE */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_R2R */
+ 0x00000002, /* MC_EMEM_ARB_TIMING_W2W */
+ 0x00000007, /* MC_EMEM_ARB_TIMING_R2W */
+ 0x00000009, /* MC_EMEM_ARB_TIMING_W2R */
+ 0x09070202, /* MC_EMEM_ARB_DA_TURNS */
+ 0x001a1016, /* MC_EMEM_ARB_DA_COVERS */
+ 0x734e2a17, /* MC_EMEM_ARB_MISC0 */
+ 0x70000f02, /* MC_EMEM_ARB_MISC1 */
+ 0x001f0000, /* MC_EMEM_ARB_RING1_THROTTLE */
+ },
+ {
+ 0x00000017, /* MC_MLL_MPCORER_PTSA_RATE */
+ 0x000001bb, /* MC_PTSA_GRANT_DECREMENT */
+ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_0 */
+ 0x006e0038, /* MC_LATENCY_ALLOWANCE_XUSB_1 */
+ 0x006e003c, /* MC_LATENCY_ALLOWANCE_TSEC_0 */
+ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMCA_0 */
+ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAA_0 */
+ 0x006e0090, /* MC_LATENCY_ALLOWANCE_SDMMC_0 */
+ 0x006e0041, /* MC_LATENCY_ALLOWANCE_SDMMCAB_0 */
+ 0x00270049, /* MC_LATENCY_ALLOWANCE_PPCS_0 */
+ 0x006e0080, /* MC_LATENCY_ALLOWANCE_PPCS_1 */
+ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORE_0 */
+ 0x006e0004, /* MC_LATENCY_ALLOWANCE_MPCORELP_0 */
+ 0x00080016, /* MC_LATENCY_ALLOWANCE_HC_0 */
+ 0x0000006e, /* MC_LATENCY_ALLOWANCE_HC_1 */
+ 0x006e0004, /* MC_LATENCY_ALLOWANCE_AVPC_0 */
+ 0x006e0019, /* MC_LATENCY_ALLOWANCE_GPU_0 */
+ 0x006e0018, /* MC_LATENCY_ALLOWANCE_MSENC_0 */
+ 0x006e0024, /* MC_LATENCY_ALLOWANCE_HDA_0 */
+ 0x006e001b, /* MC_LATENCY_ALLOWANCE_VIC_0 */
+ 0x0000006e, /* MC_LATENCY_ALLOWANCE_VI2_0 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2_0 */
+ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2_1 */
+ 0x00000036, /* MC_LATENCY_ALLOWANCE_ISP2B_0 */
+ 0x006e006e, /* MC_LATENCY_ALLOWANCE_ISP2B_1 */
+ 0x00d400ff, /* MC_LATENCY_ALLOWANCE_VDE_0 */
+ 0x00510029, /* MC_LATENCY_ALLOWANCE_VDE_1 */
+ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_2 */
+ 0x006e006e, /* MC_LATENCY_ALLOWANCE_VDE_3 */
+ 0x006e0065, /* MC_LATENCY_ALLOWANCE_SATA_0 */
+ 0x006e001c, /* MC_LATENCY_ALLOWANCE_AFI_0 */
+ },
+ 0x0000004c, /* EMC_ZCAL_WAIT_CNT after clock change */
+ 0x001fffff, /* EMC_AUTO_CAL_INTERVAL */
+ 0x00000802, /* EMC_CTT_TERM_CTRL */
+ 0x73300000, /* EMC_CFG */
+ 0x0000089d, /* EMC_CFG_2 */
+ 0x00040000, /* EMC_SEL_DPD_CTRL */
+ 0xe0040069, /* EMC_CFG_DIG_DLL */
+ 0x00000000, /* EMC_BGBIAS_CTL0 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG2 */
+ 0x00000000, /* EMC_AUTO_CAL_CONFIG3 */
+ 0xa1430000, /* EMC_AUTO_CAL_CONFIG */
+ 0x80000f15, /* Mode Register 0 */
+ 0x80100002, /* Mode Register 1 */
+ 0x80200020, /* Mode Register 2 */
+ 0x00000000, /* Mode Register 4 */
+ 1180, /* expected dvfs latency (ns) */
+ },
+};
+
+static struct tegra12_emc_pdata apalis_tk1_2GB_emc_pdata = {
+ .description = "apalis-tk1_emc_tables",
+ .tables = apalis_tk1_ddr3_emc_table,
+ .num_tables = ARRAY_SIZE(apalis_tk1_ddr3_emc_table),
+};
+
+/*
+ * Also handles Apalis TK1 init.
+ */
+int __init apalis_tk1_emc_init(void)
+{
+ /* If Device Tree Partition contains emc-tables, load them */
+ if (of_find_compatible_node(NULL, NULL, "nvidia,tegra12-emc")) {
+ pr_info("Loading EMC tables from DeviceTree.\n");
+ } else {
+ pr_info("Loading Apalis TK1 EMC tables.\n");
+ tegra_emc_device.dev.platform_data = &apalis_tk1_2GB_emc_pdata;
+
+ platform_device_register(&tegra_emc_device);
+ }
+
+ tegra12_emc_init();
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-panel.c b/arch/arm/mach-tegra/board-apalis-tk1-panel.c
new file mode 100644
index 000000000000..e4995841b8e4
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-panel.c
@@ -0,0 +1,854 @@
+/*
+ * arch/arm/mach-tegra/board-apalis-tk1-panel.c
+ *
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/ioport.h>
+#include <linux/fb.h>
+#include <linux/nvmap.h>
+#include <linux/nvhost.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/dma-contiguous.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+
+#include <mach/irqs.h>
+#include <mach/dc.h>
+#include <mach/io_dpd.h>
+
+#include "board.h"
+#include "tegra-board-id.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "board-apalis-tk1.h"
+#include "board-panel.h"
+#include "common.h"
+#include "iomap.h"
+#include "tegra12_host1x_devices.h"
+#include "dvfs.h"
+
+struct platform_device *__init apalis_tk1_host1x_init(void)
+{
+ struct platform_device *pdev = NULL;
+
+#ifdef CONFIG_TEGRA_GRHOST
+ if (!of_have_populated_dt())
+ pdev = tegra12_register_host1x_devices();
+ else
+ pdev =
+ to_platform_device(bus_find_device_by_name
+ (&platform_bus_type, NULL, "host1x"));
+
+ if (!pdev) {
+ pr_err("host1x devices registration failed\n");
+ return NULL;
+ }
+#endif
+ return pdev;
+}
+
+/* hdmi related regulators */
+static struct regulator *apalis_tk1_hdmi_reg;
+static struct regulator *apalis_tk1_hdmi_pll;
+static struct regulator *apalis_tk1_hdmi_vddio;
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+static struct resource apalis_tk1_disp1_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by apalis_tk1_panel_init() */
+ .end = 0, /* Filled in by apalis_tk1_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ganged_dsia_regs",
+ .start = 0, /* Filled in the panel file by init_resources() */
+ .end = 0, /* Filled in the panel file by init_resources() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "ganged_dsib_regs",
+ .start = 0, /* Filled in the panel file by init_resources() */
+ .end = 0, /* Filled in the panel file by init_resources() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dsi_regs",
+ .start = 0, /* Filled in the panel file by init_resources() */
+ .end = 0, /* Filled in the panel file by init_resources() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mipi_cal",
+ .start = TEGRA_MIPI_CAL_BASE,
+ .end = TEGRA_MIPI_CAL_BASE + TEGRA_MIPI_CAL_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource apalis_tk1_disp1_edp_resources[] = {
+ {
+ .name = "irq",
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by apalis_tk1_panel_init() */
+ .end = 0, /* Filled in by apalis_tk1_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "mipi_cal",
+ .start = TEGRA_MIPI_CAL_BASE,
+ .end = TEGRA_MIPI_CAL_BASE + TEGRA_MIPI_CAL_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "sor",
+ .start = TEGRA_SOR_BASE,
+ .end = TEGRA_SOR_BASE + TEGRA_SOR_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dpaux",
+ .start = TEGRA_DPAUX_BASE,
+ .end = TEGRA_DPAUX_BASE + TEGRA_DPAUX_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "irq_dp",
+ .start = INT_DPAUX,
+ .end = INT_DPAUX,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+#endif
+
+static struct resource apalis_tk1_disp2_resources[] = {
+ {
+ .name = "irq",
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ .start = INT_DISPLAY_B_GENERAL,
+ .end = INT_DISPLAY_B_GENERAL,
+#else
+ .start = INT_DISPLAY_GENERAL,
+ .end = INT_DISPLAY_GENERAL,
+#endif
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "regs",
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ .start = TEGRA_DISPLAY2_BASE,
+ .end = TEGRA_DISPLAY2_BASE + TEGRA_DISPLAY2_SIZE - 1,
+#else
+ .start = TEGRA_DISPLAY_BASE,
+ .end = TEGRA_DISPLAY_BASE + TEGRA_DISPLAY_SIZE - 1,
+#endif
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "fbmem",
+ .start = 0, /* Filled in by apalis_tk1_panel_init() */
+ .end = 0, /* Filled in by apalis_tk1_panel_init() */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "hdmi_regs",
+ .start = TEGRA_HDMI_BASE,
+ .end = TEGRA_HDMI_BASE + TEGRA_HDMI_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+static struct tegra_dc_sd_settings sd_settings;
+
+#ifndef APALIS_TK1_EDP
+static struct tegra_dc_out_pin lvds_out_pins[] = {
+ {
+ .name = TEGRA_DC_OUT_PIN_H_SYNC,
+ .pol = TEGRA_DC_OUT_PIN_POL_LOW,
+ },
+ {
+ .name = TEGRA_DC_OUT_PIN_V_SYNC,
+ .pol = TEGRA_DC_OUT_PIN_POL_LOW,
+ },
+ {
+ .name = TEGRA_DC_OUT_PIN_PIXEL_CLOCK,
+ .pol = TEGRA_DC_OUT_PIN_POL_HIGH,
+ },
+ {
+ .name = TEGRA_DC_OUT_PIN_DATA_ENABLE,
+ .pol = TEGRA_DC_OUT_PIN_POL_HIGH,
+ },
+};
+#endif /* !APALIS_TK1_EDP */
+
+static struct tegra_dc_out apalis_tk1_disp1_out = {
+#ifdef APALIS_TK1_EDP
+ .type = TEGRA_DC_OUT_DSI,
+ .sd_settings = &sd_settings,
+#else /* APALIS_TK1_EDP */
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+ .flags = TEGRA_DC_OUT_CONTINUOUS_MODE,
+ .type = TEGRA_DC_OUT_LVDS,
+ .default_mode = "640x480-16@60",
+ .out_pins = lvds_out_pins,
+ .n_out_pins = ARRAY_SIZE(lvds_out_pins),
+#endif /* APALIS_TK1_EDP */
+};
+#endif
+
+static int apalis_tk1_hdmi_enable(struct device *dev)
+{
+ int ret;
+ if (!apalis_tk1_hdmi_reg) {
+ apalis_tk1_hdmi_reg = regulator_get(dev, "avdd_hdmi");
+ if (IS_ERR(apalis_tk1_hdmi_reg)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi\n");
+ apalis_tk1_hdmi_reg = NULL;
+ return PTR_ERR(apalis_tk1_hdmi_reg);
+ }
+ }
+ ret = regulator_enable(apalis_tk1_hdmi_reg);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi\n");
+ return ret;
+ }
+ if (!apalis_tk1_hdmi_pll) {
+ apalis_tk1_hdmi_pll = regulator_get(dev, "avdd_hdmi_pll");
+ if (IS_ERR(apalis_tk1_hdmi_pll)) {
+ pr_err("hdmi: couldn't get regulator avdd_hdmi_pll\n");
+ apalis_tk1_hdmi_pll = NULL;
+ regulator_put(apalis_tk1_hdmi_reg);
+ apalis_tk1_hdmi_reg = NULL;
+ return PTR_ERR(apalis_tk1_hdmi_pll);
+ }
+ }
+ ret = regulator_enable(apalis_tk1_hdmi_pll);
+ if (ret < 0) {
+ pr_err("hdmi: couldn't enable regulator avdd_hdmi_pll\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int apalis_tk1_hdmi_disable(void)
+{
+ if (apalis_tk1_hdmi_reg) {
+ regulator_disable(apalis_tk1_hdmi_reg);
+ regulator_put(apalis_tk1_hdmi_reg);
+ apalis_tk1_hdmi_reg = NULL;
+ }
+
+ if (apalis_tk1_hdmi_pll) {
+ regulator_disable(apalis_tk1_hdmi_pll);
+ regulator_put(apalis_tk1_hdmi_pll);
+ apalis_tk1_hdmi_pll = NULL;
+ }
+ return 0;
+}
+
+static int apalis_tk1_hdmi_postsuspend(void)
+{
+ if (apalis_tk1_hdmi_vddio) {
+ regulator_disable(apalis_tk1_hdmi_vddio);
+ regulator_put(apalis_tk1_hdmi_vddio);
+ apalis_tk1_hdmi_vddio = NULL;
+ }
+ return 0;
+}
+
+static int apalis_tk1_hdmi_hotplug_init(struct device *dev)
+{
+ if (!apalis_tk1_hdmi_vddio) {
+ apalis_tk1_hdmi_vddio = regulator_get(dev, "vdd_hdmi_5v0");
+ if (WARN_ON(IS_ERR(apalis_tk1_hdmi_vddio))) {
+ pr_err("%s: couldn't get regulator vdd_hdmi_5v0: %ld\n",
+ __func__, PTR_ERR(apalis_tk1_hdmi_vddio));
+ apalis_tk1_hdmi_vddio = NULL;
+ } else {
+ return regulator_enable(apalis_tk1_hdmi_vddio);
+ }
+ }
+
+ return 0;
+}
+
+struct tmds_config apalis_tk1_tmds_config[] = {
+ { /* 480p/576p / 25.2MHz/27MHz modes */
+ .version = MKDEV(1, 0),
+ .pclk = 27000000,
+ .pll0 = 0x01003010,
+ .pll1 = 0x00301B00,
+ .pe_current = 0x00000000,
+ .drive_current = 0x1F1F1F1F,
+ .peak_current = 0x03030303,
+ .pad_ctls0_mask = 0xfffff0ff,
+ .pad_ctls0_setting = 0x00000400, /* BG_VREF_LEVEL */
+ },
+ { /* 720p / 74.25MHz modes */
+ .version = MKDEV(1, 0),
+ .pclk = 74250000,
+ .pll0 = 0x01003110,
+ .pll1 = 0x00301500,
+ .pe_current = 0x00000000,
+ .drive_current = 0x2C2C2C2C,
+ .peak_current = 0x07070707,
+ .pad_ctls0_mask = 0xfffff0ff,
+ .pad_ctls0_setting = 0x00000400, /* BG_VREF_LEVEL */
+ },
+ { /* 1080p / 148.5MHz modes */
+ .version = MKDEV(1, 0),
+ .pclk = 148500000,
+ .pll0 = 0x01003310,
+ .pll1 = 0x00301500,
+ .pe_current = 0x00000000,
+ .drive_current = 0x33333333,
+ .peak_current = 0x0C0C0C0C,
+ .pad_ctls0_mask = 0xfffff0ff,
+ .pad_ctls0_setting = 0x00000400, /* BG_VREF_LEVEL */
+ },
+ {
+ .version = MKDEV(1, 0),
+ .pclk = INT_MAX,
+ .pll0 = 0x01003F10,
+ .pll1 = 0x00300F00,
+ .pe_current = 0x00000000,
+ /* lane3 needs a slightly lower current */
+ .drive_current = 0x37373737,
+ .peak_current = 0x17171717,
+ .pad_ctls0_mask = 0xfffff0ff,
+ .pad_ctls0_setting = 0x00000600, /* BG_VREF_LEVEL */
+ },
+};
+
+struct tegra_hdmi_out apalis_tk1_hdmi_out = {
+ .tmds_config = apalis_tk1_tmds_config,
+ .n_tmds_config = ARRAY_SIZE(apalis_tk1_tmds_config),
+};
+
+static struct tegra_dc_out apalis_tk1_disp2_out = {
+ .type = TEGRA_DC_OUT_HDMI,
+ .flags = TEGRA_DC_OUT_HOTPLUG_HIGH,
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ .parent_clk = "pll_d2",
+#else
+ .parent_clk = "pll_d",
+#endif /* CONFIG_TEGRA_HDMI_PRIMARY */
+
+ .ddc_bus = 3,
+ .hotplug_gpio = apalis_tk1_hdmi_hpd,
+ .hdmi_out = &apalis_tk1_hdmi_out,
+
+ /* TODO: update max pclk to POR */
+ .max_pixclock = KHZ2PICOS(297000),
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE) || defined(CONFIG_TEGRA_HDMI_PRIMARY)
+ .default_mode = "1920x1080-16@60",
+ .depth = 24,
+#endif /* CONFIG_FRAMEBUFFER_CONSOLE */
+
+ .align = TEGRA_DC_ALIGN_MSB,
+ .order = TEGRA_DC_ORDER_RED_BLUE,
+
+ .enable = apalis_tk1_hdmi_enable,
+ .disable = apalis_tk1_hdmi_disable,
+ .postsuspend = apalis_tk1_hdmi_postsuspend,
+ .hotplug_init = apalis_tk1_hdmi_hotplug_init,
+};
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+static struct tegra_fb_data apalis_tk1_disp1_fb_data = {
+ .win = 0,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_platform_data apalis_tk1_disp1_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &apalis_tk1_disp1_out,
+ .fb = &apalis_tk1_disp1_fb_data,
+ .emc_clk_rate = 204000000,
+#ifdef CONFIG_TEGRA_DC_CMU
+ .cmu_enable = 1,
+#endif
+ .low_v_win = 0x02,
+};
+#endif
+
+static struct tegra_fb_data apalis_tk1_disp2_fb_data = {
+ .win = 0,
+ .flags = TEGRA_FB_FLIP_ON_PROBE,
+};
+
+static struct tegra_dc_platform_data apalis_tk1_disp2_pdata = {
+ .flags = TEGRA_DC_FLAG_ENABLED,
+ .default_out = &apalis_tk1_disp2_out,
+ .fb = &apalis_tk1_disp2_fb_data,
+ .emc_clk_rate = 300000000,
+};
+
+static struct platform_device apalis_tk1_disp2_device = {
+ .name = "tegradc",
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ .id = 1,
+#else
+ .id = 0,
+#endif
+ .resource = apalis_tk1_disp2_resources,
+ .num_resources = ARRAY_SIZE(apalis_tk1_disp2_resources),
+ .dev = {
+ .platform_data = &apalis_tk1_disp2_pdata,
+ },
+};
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+static struct platform_device apalis_tk1_disp1_device = {
+ .name = "tegradc",
+ .id = 0,
+ .resource = apalis_tk1_disp1_resources,
+ .num_resources = ARRAY_SIZE(apalis_tk1_disp1_resources),
+ .dev = {
+ .platform_data = &apalis_tk1_disp1_pdata,
+ },
+};
+#endif
+
+static struct nvmap_platform_carveout apalis_tk1_carveouts[] = {
+ [0] = {
+ .name = "iram",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_IRAM,
+ .base = TEGRA_IRAM_BASE + TEGRA_RESET_HANDLER_SIZE,
+ .size = TEGRA_IRAM_SIZE - TEGRA_RESET_HANDLER_SIZE,
+ .dma_dev = &tegra_iram_dev,
+ },
+ [1] = {
+ .name = "generic-0",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_GENERIC,
+ .base = 0, /* Filled in by apalis_tk1_panel_init() */
+ .size = 0, /* Filled in by apalis_tk1_panel_init() */
+ .dma_dev = &tegra_generic_dev,
+ },
+ [2] = {
+ .name = "vpr",
+ .usage_mask = NVMAP_HEAP_CARVEOUT_VPR,
+ .base = 0, /* Filled in by apalis_tk1_panel_init() */
+ .size = 0, /* Filled in by apalis_tk1_panel_init() */
+ .dma_dev = &tegra_vpr_dev,
+ },
+};
+
+static struct nvmap_platform_data apalis_tk1_nvmap_data = {
+ .carveouts = apalis_tk1_carveouts,
+ .nr_carveouts = ARRAY_SIZE(apalis_tk1_carveouts),
+};
+
+static struct platform_device apalis_tk1_nvmap_device = {
+ .name = "tegra-nvmap",
+ .id = -1,
+ .dev = {
+ .platform_data = &apalis_tk1_nvmap_data,
+ },
+};
+
+#ifdef APALIS_TK1_EDP
+static struct tegra_dc_dp_lt_settings apalis_tk1_edp_lt_data[] = {
+ {
+ .drive_current = {
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ },
+ .lane_preemphasis = {
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ },
+ .post_cursor = {
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ },
+ .tx_pu = 0,
+ .load_adj = 0x3,
+ },
+ {
+ .drive_current = {
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ },
+ .lane_preemphasis = {
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ PRE_EMPHASIS_L0,
+ },
+ .post_cursor = {
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ },
+ .tx_pu = 0,
+ .load_adj = 0x4,
+ },
+ {
+ .drive_current = {
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ DRIVE_CURRENT_L0,
+ },
+ .lane_preemphasis = {
+ PRE_EMPHASIS_L1,
+ PRE_EMPHASIS_L1,
+ PRE_EMPHASIS_L1,
+ PRE_EMPHASIS_L1,
+ },
+ .post_cursor = {
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ POST_CURSOR2_L0,
+ },
+ .tx_pu = 0,
+ .load_adj = 0x6,
+ },
+};
+
+static struct tegra_dp_out dp_settings = {
+ /* Panel can override this with its own LT data */
+ .lt_settings = apalis_tk1_edp_lt_data,
+ .n_lt_settings = ARRAY_SIZE(apalis_tk1_edp_lt_data),
+ .tx_pu_disable = true,
+};
+#endif /* APALIS_TK1_EDP */
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+/* can be called multiple times */
+static struct tegra_panel *apalis_tk1_panel_configure(void)
+{
+ struct tegra_panel *panel = NULL;
+
+ apalis_tk1_disp1_device.resource = apalis_tk1_disp1_edp_resources;
+ apalis_tk1_disp1_device.num_resources =
+ ARRAY_SIZE(apalis_tk1_disp1_edp_resources);
+#ifdef APALIS_TK1_EDP
+ apalis_tk1_disp1_out.dp_out = &dp_settings;
+ apalis_tk1_disp1_out.type = TEGRA_DC_OUT_DP;
+ panel = &edp_a_1080p_14_0;
+#else /* APALIS_TK1_EDP */
+ apalis_tk1_disp1_out.type = TEGRA_DC_OUT_LVDS;
+ panel = &lvds_c_1366_14;
+#endif /* APALIS_TK1_EDP */
+// apalis_tk1_disp1_out.rotation = 180;
+
+ return panel;
+}
+
+static void apalis_tk1_panel_select(void)
+{
+ struct tegra_panel *panel = NULL;
+
+ panel = apalis_tk1_panel_configure();
+
+ if (panel) {
+ if (panel->init_sd_settings)
+ panel->init_sd_settings(&sd_settings);
+
+ if (panel->init_dc_out)
+ panel->init_dc_out(&apalis_tk1_disp1_out);
+
+ if (panel->init_cmu_data)
+ panel->init_cmu_data(&apalis_tk1_disp1_pdata);
+
+ if (panel->set_disp_device)
+ panel->set_disp_device(&apalis_tk1_disp1_device);
+
+ if (panel->register_bl_dev)
+ panel->register_bl_dev();
+
+ if (panel->register_i2c_bridge)
+ panel->register_i2c_bridge();
+ }
+
+}
+#endif
+
+int __init apalis_tk1_panel_init(void)
+{
+ int err = 0;
+ struct resource __maybe_unused *res;
+ struct platform_device *phost1x = NULL;
+
+ struct device_node *dc1_node = NULL;
+ struct device_node *dc2_node = NULL;
+ struct device_node *hdmi_node = NULL;
+#ifdef CONFIG_NVMAP_USE_CMA_FOR_CARVEOUT
+ struct dma_declare_info vpr_dma_info;
+ struct dma_declare_info generic_dma_info;
+#endif
+
+ find_dc_node(&dc1_node, &dc2_node);
+ hdmi_node = of_find_node_by_path("/host1x/hdmi");
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ apalis_tk1_panel_select();
+#endif
+
+#ifdef CONFIG_TEGRA_NVMAP
+ apalis_tk1_carveouts[1].base = tegra_carveout_start;
+ apalis_tk1_carveouts[1].size = tegra_carveout_size;
+
+ apalis_tk1_carveouts[2].base = tegra_vpr_start;
+ apalis_tk1_carveouts[2].size = tegra_vpr_size;
+
+#ifdef CONFIG_NVMAP_USE_CMA_FOR_CARVEOUT
+ generic_dma_info.name = "generic";
+ generic_dma_info.base = tegra_carveout_start;
+ generic_dma_info.size = tegra_carveout_size;
+ generic_dma_info.resize = false;
+ generic_dma_info.cma_dev = NULL;
+
+ vpr_dma_info.name = "vpr";
+ vpr_dma_info.base = tegra_vpr_start;
+ vpr_dma_info.size = tegra_vpr_size;
+ vpr_dma_info.resize = false;
+ vpr_dma_info.cma_dev = NULL;
+ apalis_tk1_carveouts[1].cma_dev = &tegra_generic_cma_dev;
+ apalis_tk1_carveouts[1].resize = false;
+ apalis_tk1_carveouts[2].cma_dev = &tegra_vpr_cma_dev;
+ apalis_tk1_carveouts[2].resize = true;
+
+ vpr_dma_info.size = SZ_32M;
+ vpr_dma_info.resize = true;
+ vpr_dma_info.cma_dev = &tegra_vpr_cma_dev;
+ vpr_dma_info.notifier.ops = &vpr_dev_ops;
+
+ if (tegra_carveout_size) {
+ err = dma_declare_coherent_resizable_cma_memory(
+ &tegra_generic_dev, &generic_dma_info);
+ if (err) {
+ pr_err("Generic coherent memory declaration failed\n");
+ return err;
+ }
+ }
+ if (tegra_vpr_size) {
+ err =
+ dma_declare_coherent_resizable_cma_memory(&tegra_vpr_dev,
+ &vpr_dma_info);
+ if (err) {
+ pr_err("VPR coherent memory declaration failed\n");
+ return err;
+ }
+ }
+#endif
+
+ err = platform_device_register(&apalis_tk1_nvmap_device);
+ if (err) {
+ pr_err("nvmap device registration failed\n");
+ return err;
+ }
+#endif
+
+ phost1x = apalis_tk1_host1x_init();
+ if (!phost1x) {
+ pr_err("host1x devices registration failed\n");
+ return -EINVAL;
+ }
+
+ if (hdmi_node) {
+ struct device_node *ddc;
+ int id;
+ ddc = of_parse_phandle(hdmi_node, "nvidia,ddc-i2c-bus", 0);
+ if (ddc) {
+ id = of_alias_get_id(ddc, "i2c");
+ if (id >= 0) {
+ apalis_tk1_disp2_out.ddc_bus = id;
+ } else
+ pr_err("Invalid HDMI ddc in the device-tree\n");
+ } else
+ pr_err("No ddc for HDMI node in the device-tree\n");
+ }
+
+ if (!of_have_populated_dt() || !dc1_node ||
+ !of_device_is_available(dc1_node)) {
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ res = platform_get_resource_byname(&apalis_tk1_disp1_device,
+ IORESOURCE_MEM, "fbmem");
+#else
+ res = platform_get_resource_byname(&apalis_tk1_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+#endif
+ res->start = tegra_fb_start;
+ res->end = tegra_fb_start + tegra_fb_size - 1;
+ }
+
+ /* Copy the bootloader fb to the fb. */
+ if (tegra_bootloader_fb_size)
+ __tegra_move_framebuffer(&apalis_tk1_nvmap_device,
+ tegra_fb_start,
+ tegra_bootloader_fb_start,
+ min(tegra_fb_size,
+ tegra_bootloader_fb_size));
+ else
+ __tegra_clear_framebuffer(&apalis_tk1_nvmap_device,
+ tegra_fb_start, tegra_fb_size);
+
+ /* Copy the bootloader fb2 to the fb2. */
+ if (tegra_bootloader_fb2_size)
+ __tegra_move_framebuffer(&apalis_tk1_nvmap_device,
+ tegra_fb2_start,
+ tegra_bootloader_fb2_start,
+ min(tegra_fb2_size,
+ tegra_bootloader_fb2_size));
+ else
+ __tegra_clear_framebuffer(&apalis_tk1_nvmap_device,
+ tegra_fb2_start, tegra_fb2_size);
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ if (!of_have_populated_dt() || !dc1_node ||
+ !of_device_is_available(dc1_node)) {
+ apalis_tk1_disp1_device.dev.parent = &phost1x->dev;
+ err = platform_device_register(&apalis_tk1_disp1_device);
+ if (err) {
+ pr_err("disp1 device registration failed\n");
+ return err;
+ }
+ }
+#endif
+
+ apalis_tk1_tmds_config[1].pe_current = 0x08080808;
+ apalis_tk1_tmds_config[1].drive_current = 0x2d2d2d2d;
+ apalis_tk1_tmds_config[1].peak_current = 0x0;
+ apalis_tk1_tmds_config[2].pe_current = 0x0;
+ apalis_tk1_tmds_config[2].drive_current = 0x2d2d2d2d;
+ apalis_tk1_tmds_config[2].peak_current = 0x05050505;
+
+ if (!of_have_populated_dt() || !dc2_node ||
+ !of_device_is_available(dc2_node)) {
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ res = platform_get_resource_byname(&apalis_tk1_disp2_device,
+ IORESOURCE_MEM, "fbmem");
+ res->start = tegra_fb2_start;
+ res->end = tegra_fb2_start + tegra_fb2_size - 1;
+#endif
+ apalis_tk1_disp2_device.dev.parent = &phost1x->dev;
+ err = platform_device_register(&apalis_tk1_disp2_device);
+ if (err) {
+ pr_err("disp2 device registration failed\n");
+ return err;
+ }
+ }
+
+ return err;
+}
+
+int __init apalis_tk1_display_init(void)
+{
+ struct clk *disp1_clk = clk_get_sys("tegradc.0", NULL);
+ struct clk *disp2_clk = clk_get_sys("tegradc.1", NULL);
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ struct tegra_panel *panel;
+ long disp1_rate = 0;
+#endif
+ long disp2_rate = 0;
+
+ /*
+ * TODO
+ * Need to skip apalis_tk1_display_init
+ * when disp is registered by device_tree
+ */
+
+ if (WARN_ON(IS_ERR(disp1_clk))) {
+ if (disp2_clk && !IS_ERR(disp2_clk))
+ clk_put(disp2_clk);
+ return PTR_ERR(disp1_clk);
+ }
+
+ if (WARN_ON(IS_ERR(disp2_clk))) {
+ clk_put(disp1_clk);
+ return PTR_ERR(disp1_clk);
+ }
+
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ panel = apalis_tk1_panel_configure();
+
+ if (panel && panel->init_dc_out) {
+ panel->init_dc_out(&apalis_tk1_disp1_out);
+ if (apalis_tk1_disp1_out.n_modes && apalis_tk1_disp1_out.modes)
+ disp1_rate = apalis_tk1_disp1_out.modes[0].pclk;
+ } else {
+ disp1_rate = 0;
+ if (!panel || !panel->init_dc_out)
+ pr_err("disp1 panel output not specified!\n");
+ }
+
+ pr_debug("disp1 pclk=%ld\n", disp1_rate);
+ if (disp1_rate)
+ tegra_dvfs_resolve_override(disp1_clk, disp1_rate);
+#endif
+
+ /* set up disp2 */
+ if (apalis_tk1_disp2_out.max_pixclock)
+ disp2_rate = PICOS2KHZ(apalis_tk1_disp2_out.max_pixclock)
+ * 1000;
+ else
+ disp2_rate = 297000000; /* HDMI 4K */
+ pr_debug("disp2 pclk=%ld\n", disp2_rate);
+ if (disp2_rate)
+#ifndef CONFIG_TEGRA_HDMI_PRIMARY
+ tegra_dvfs_resolve_override(disp2_clk, disp2_rate);
+#else
+ tegra_dvfs_resolve_override(disp1_clk, disp2_rate);
+#endif
+
+ clk_put(disp1_clk);
+ clk_put(disp2_clk);
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-power.c b/arch/arm/mach-tegra/board-apalis-tk1-power.c
new file mode 100644
index 000000000000..69ea8d378e25
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-power.c
@@ -0,0 +1,388 @@
+/*
+ * arch/arm/mach-tegra/board-apali-tk1-power.c
+ *
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/io.h>
+#include <mach/edp.h>
+#include <mach/irqs.h>
+#include <linux/edp.h>
+#include <linux/platform_data/tegra_edp.h>
+#include <linux/pid_thermal_gov.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/regulator/tegra-dfll-bypass-regulator.h>
+#include <linux/tegra-fuse.h>
+#include <linux/tegra-pmc.h>
+
+#include <asm/mach-types.h>
+#include <mach/pinmux-t12.h>
+
+#include "pm.h"
+#include "dvfs.h"
+#include "board.h"
+#include "common.h"
+#include "tegra-board-id.h"
+#include "board-common.h"
+#include "board-apalis-tk1.h"
+#include "board-pmu-defines.h"
+#include "devices.h"
+#include "iomap.h"
+#include "tegra_cl_dvfs.h"
+#include "tegra11_soctherm.h"
+
+static struct tegra_suspend_platform_data apalis_tk1_suspend_data = {
+ .cpu_timer = 500,
+ .cpu_off_timer = 300,
+ .suspend_mode = TEGRA_SUSPEND_LP1,
+ .core_timer = 0x157e,
+ .core_off_timer = 10,
+ .corereq_high = true,
+ .sysclkreq_high = true,
+ .cpu_lp2_min_residency = 1000,
+ .min_residency_vmin_fmin = 1000,
+ .min_residency_ncpu_fast = 8000,
+ .min_residency_ncpu_slow = 5000,
+ .min_residency_mclk_stop = 5000,
+ .min_residency_crail = 20000,
+ .crail_up_early = true,
+};
+
+/************************ Apalis TK1 CL-DVFS Data *********************/
+#define APALIS_TK1_DEFAULT_CVB_ALIGNMENT 10000
+
+#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
+/* board parameters for cpu dfll */
+static struct tegra_cl_dvfs_cfg_param apalis_tk1_cl_dvfs_param = {
+ .sample_rate = 12500,
+
+ .force_mode = TEGRA_CL_DVFS_FORCE_FIXED,
+ .cf = 10,
+ .ci = 0,
+ .cg = 2,
+
+ .droop_cut_value = 0xF,
+ .droop_restore_ramp = 0x0,
+ .scale_out_ramp = 0x0,
+};
+#endif
+
+/* Apalis TK1: fixed 10mV steps from 700mV to 1400mV */
+#define PMU_CPU_VDD_MAP_SIZE ((1400000 - 700000) / 10000 + 1)
+static struct voltage_reg_map pmu_cpu_vdd_map[PMU_CPU_VDD_MAP_SIZE];
+static inline void fill_reg_map(void)
+{
+ int i;
+ u32 reg_init_value = 0x1e;
+ for (i = 0; i < PMU_CPU_VDD_MAP_SIZE; i++) {
+ pmu_cpu_vdd_map[i].reg_value = i + reg_init_value;
+ pmu_cpu_vdd_map[i].reg_uV = 700000 + 10000 * i;
+ }
+}
+
+#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
+static struct tegra_cl_dvfs_platform_data apalis_tk1_cl_dvfs_data = {
+ .dfll_clk_name = "dfll_cpu",
+ .pmu_if = TEGRA_CL_DVFS_PMU_I2C,
+ .u.pmu_i2c = {
+ .fs_rate = 400000,
+ .slave_addr = 0x80,
+ .reg = 0x00,
+ },
+ .vdd_map = pmu_cpu_vdd_map,
+ .vdd_map_size = PMU_CPU_VDD_MAP_SIZE,
+
+ .cfg_param = &apalis_tk1_cl_dvfs_param,
+};
+
+/* ToDo: Jetson TK1 is missing such dfll node */
+static const struct of_device_id dfll_of_match[] = {
+ { .compatible = "nvidia,tegra124-dfll", },
+ { },
+};
+
+static int __init apalis_tk1_cl_dvfs_init(void)
+{
+ struct device_node *dn = of_find_matching_node(NULL, dfll_of_match);
+
+ /*
+ * Apalis TK1 platforms maybe used with different DT variants. Some of them
+ * include DFLL data in DT, some - not. Check DT here, and continue with
+ * platform device registration only if DT DFLL node is not present.
+ */
+ if (dn) {
+ bool available = of_device_is_available(dn);
+ of_node_put(dn);
+ if (available)
+ return 0;
+ }
+
+ fill_reg_map();
+ apalis_tk1_cl_dvfs_data.flags = TEGRA_CL_DVFS_DYN_OUTPUT_CFG;
+ tegra_cl_dvfs_device.dev.platform_data = &apalis_tk1_cl_dvfs_data;
+ platform_device_register(&tegra_cl_dvfs_device);
+
+ return 0;
+}
+#endif
+
+int __init apalis_tk1_rail_alignment_init(void)
+{
+ tegra12x_vdd_cpu_align(APALIS_TK1_DEFAULT_CVB_ALIGNMENT, 0);
+
+ return 0;
+}
+
+int __init apalis_tk1_regulator_init(void)
+{
+#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
+ apalis_tk1_cl_dvfs_init();
+#endif
+ tegra_pmc_pmu_interrupt_polarity(true);
+
+ return 0;
+}
+
+int __init apalis_tk1_suspend_init(void)
+{
+ tegra_init_suspend(&apalis_tk1_suspend_data);
+
+ return 0;
+}
+
+int __init apalis_tk1_edp_init(void)
+{
+ unsigned int regulator_mA;
+
+ regulator_mA = get_maximum_cpu_current_supported();
+ if (!regulator_mA) {
+ /* CD575M UCM2 */
+ if(tegra_cpu_speedo_id() == 6)
+ regulator_mA = 11800;
+ /* CD575MI UCM1 */
+ else if (tegra_cpu_speedo_id() == 8)
+ regulator_mA = 12450;
+ /* CD575MI UCM2 */
+ else if (tegra_cpu_speedo_id() == 7)
+ regulator_mA = 11500;
+ /* CD575M UCM1 default */
+ else
+ regulator_mA = 12500;
+ }
+
+ pr_info("%s: CPU regulator %d mA\n", __func__, regulator_mA);
+ tegra_init_cpu_edp_limits(regulator_mA);
+
+ /* gpu maximum current */
+ regulator_mA = 11400;
+
+ pr_info("%s: GPU regulator %d mA\n", __func__, regulator_mA);
+ tegra_init_gpu_edp_limits(regulator_mA);
+
+ return 0;
+}
+
+static struct pid_thermal_gov_params soctherm_pid_params = {
+ .max_err_temp = 9000,
+ .max_err_gain = 1000,
+
+ .gain_p = 1000,
+ .gain_d = 0,
+
+ .up_compensation = 20,
+ .down_compensation = 20,
+};
+
+static struct thermal_zone_params soctherm_tzp = {
+ .governor_name = "pid_thermal_gov",
+ .governor_params = &soctherm_pid_params,
+};
+
+static struct tegra_thermtrip_pmic_data tpdata_as3722 = {
+ .reset_tegra = 1,
+ .pmu_16bit_ops = 0,
+ .controller_type = 0,
+ .pmu_i2c_addr = 0x40,
+ .i2c_controller_id = 4,
+ .poweroff_reg_addr = 0x36,
+ .poweroff_reg_data = 0x2,
+};
+
+static struct soctherm_platform_data apalis_tk1_soctherm_data = {
+ .therm = {
+ [THERM_CPU] = {
+ .zone_enable = true,
+ .passive_delay = 1000,
+ .hotspot_offset = 10000,
+ .num_trips = 3,
+ .trips = {
+ {
+ .cdev_type = "tegra-shutdown",
+ .trip_temp = 105000,
+ .trip_type = THERMAL_TRIP_CRITICAL,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ {
+ .cdev_type = "tegra-heavy",
+ .trip_temp = 102000,
+ .trip_type = THERMAL_TRIP_HOT,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ {
+ .cdev_type = "cpu-balanced",
+ .trip_temp = 92000,
+ .trip_type = THERMAL_TRIP_PASSIVE,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ },
+ .tzp = &soctherm_tzp,
+ },
+ [THERM_GPU] = {
+ .zone_enable = true,
+ .passive_delay = 1000,
+ .hotspot_offset = 5000,
+ .num_trips = 3,
+ .trips = {
+ {
+ .cdev_type = "tegra-shutdown",
+ .trip_temp = 105000,
+ .trip_type = THERMAL_TRIP_CRITICAL,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ {
+ .cdev_type = "tegra-heavy",
+ .trip_temp = 99000,
+ .trip_type = THERMAL_TRIP_HOT,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ {
+ .cdev_type = "gpu-balanced",
+ .trip_temp = 89000,
+ .trip_type = THERMAL_TRIP_PASSIVE,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ },
+ .tzp = &soctherm_tzp,
+ },
+ [THERM_MEM] = {
+ .zone_enable = true,
+ .num_trips = 1,
+ .trips = {
+ {
+ .cdev_type = "tegra-shutdown",
+ .trip_temp = 105000, /* = GPU shut */
+ .trip_type = THERMAL_TRIP_CRITICAL,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ },
+ },
+ .tzp = &soctherm_tzp,
+ },
+ [THERM_PLL] = {
+ .zone_enable = true,
+ .tzp = &soctherm_tzp,
+ },
+ },
+ .throttle = {
+ [THROTTLE_HEAVY] = {
+ .priority = 100,
+ .devs = {
+ [THROTTLE_DEV_CPU] = {
+ .enable = true,
+ .depth = 80,
+ .throttling_depth = "heavy_throttling",
+ },
+ [THROTTLE_DEV_GPU] = {
+ .enable = true,
+ .throttling_depth = "heavy_throttling",
+ },
+ },
+ },
+ },
+};
+
+int __init apalis_tk1_soctherm_init(void)
+{
+ const int t13x_cpu_edp_temp_margin = 5000,
+ t13x_gpu_edp_temp_margin = 6000;
+ int cp_rev, ft_rev;
+ enum soctherm_therm_id therm_cpu = THERM_CPU;
+
+ cp_rev = tegra_fuse_calib_base_get_cp(NULL, NULL);
+ ft_rev = tegra_fuse_calib_base_get_ft(NULL, NULL);
+
+ pr_info("FUSE: cp_rev %d ft_rev %d\n", cp_rev, ft_rev);
+
+ /* do this only for supported CP,FT fuses */
+ if ((cp_rev >= 0) && (ft_rev >= 0)) {
+ tegra_platform_edp_init(apalis_tk1_soctherm_data.
+ therm[therm_cpu].trips,
+ &apalis_tk1_soctherm_data.
+ therm[therm_cpu].num_trips,
+ t13x_cpu_edp_temp_margin);
+ tegra_platform_gpu_edp_init(apalis_tk1_soctherm_data.
+ therm[THERM_GPU].trips,
+ &apalis_tk1_soctherm_data.
+ therm[THERM_GPU].num_trips,
+ t13x_gpu_edp_temp_margin);
+ tegra_add_cpu_vmax_trips(apalis_tk1_soctherm_data.
+ therm[therm_cpu].trips,
+ &apalis_tk1_soctherm_data.
+ therm[therm_cpu].num_trips);
+ tegra_add_tgpu_trips(apalis_tk1_soctherm_data.therm[THERM_GPU].
+ trips,
+ &apalis_tk1_soctherm_data.therm[THERM_GPU].
+ num_trips);
+ tegra_add_core_vmax_trips(apalis_tk1_soctherm_data.
+ therm[THERM_PLL].trips,
+ &apalis_tk1_soctherm_data.
+ therm[THERM_PLL].num_trips);
+ }
+
+ tegra_add_cpu_vmin_trips(apalis_tk1_soctherm_data.therm[therm_cpu].
+ trips,
+ &apalis_tk1_soctherm_data.therm[therm_cpu].
+ num_trips);
+ tegra_add_gpu_vmin_trips(apalis_tk1_soctherm_data.therm[THERM_GPU].
+ trips,
+ &apalis_tk1_soctherm_data.therm[THERM_GPU].
+ num_trips);
+ tegra_add_core_vmin_trips(apalis_tk1_soctherm_data.therm[THERM_PLL].
+ trips,
+ &apalis_tk1_soctherm_data.therm[THERM_PLL].
+ num_trips);
+
+ tegra_add_cpu_clk_switch_trips(apalis_tk1_soctherm_data.
+ therm[THERM_CPU].trips,
+ &apalis_tk1_soctherm_data.
+ therm[THERM_CPU].num_trips);
+
+ apalis_tk1_soctherm_data.tshut_pmu_trip_data = &tpdata_as3722;
+
+ return tegra11_soctherm_init(&apalis_tk1_soctherm_data);
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-sdhci.c b/arch/arm/mach-tegra/board-apalis-tk1-sdhci.c
new file mode 100644
index 000000000000..9a9ff228c5c7
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-sdhci.c
@@ -0,0 +1,270 @@
+/*
+ * arch/arm/mach-tegra/board-apalis-tk1-sdhci.c
+ *
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <linux/wlan_plat.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mmc/host.h>
+#include <linux/wl12xx.h>
+#include <linux/platform_data/mmc-sdhci-tegra.h>
+#include <linux/mfd/max77660/max77660-core.h>
+#include <linux/tegra-fuse.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/mach-types.h>
+#include <mach/irqs.h>
+#include <mach/gpio-tegra.h>
+#include <mach/nct.h>
+
+#include "gpio-names.h"
+#include "board.h"
+#include "board-apalis-tk1.h"
+#include "dvfs.h"
+#include "iomap.h"
+#include "tegra-board-id.h"
+
+#define FUSE_SOC_SPEEDO_0 0x134
+
+static struct resource sdhci_resource0[] = {
+ [0] = {
+ .start = INT_SDMMC1,
+ .end = INT_SDMMC1,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC1_BASE,
+ .end = TEGRA_SDMMC1_BASE + TEGRA_SDMMC1_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource2[] = {
+ [0] = {
+ .start = INT_SDMMC3,
+ .end = INT_SDMMC3,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC3_BASE,
+ .end = TEGRA_SDMMC3_BASE + TEGRA_SDMMC3_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource sdhci_resource3[] = {
+ [0] = {
+ .start = INT_SDMMC4,
+ .end = INT_SDMMC4,
+ .flags = IORESOURCE_IRQ,
+ },
+ [1] = {
+ .start = TEGRA_SDMMC4_BASE,
+ .end = TEGRA_SDMMC4_BASE + TEGRA_SDMMC4_SIZE-1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+static struct embedded_sdio_data embedded_sdio_data0 = {
+ .cccr = {
+ .sdio_vsn = 2,
+ .multi_block = 1,
+ .low_speed = 0,
+ .wide_bus = 0,
+ .high_power = 1,
+ .high_speed = 1,
+ },
+ .cis = {
+ .vendor = 0x02d0,
+ .device = 0x4329,
+ },
+};
+#endif
+
+static u64 tegra_sdhci_dmamask = DMA_BIT_MASK(64);
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data0 = {
+ .cd_gpio = TEGRA_GPIO_PV3,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .tap_delay = 0,
+ .trim_delay = 0x2,
+/* .trim_delay = 0x3, */
+/* TBD
+ .ddr_clk_limit = 41000000, */
+ .uhs_mask = MMC_UHS_MASK_DDR50,
+ .calib_3v3_offsets = 0x7676,
+ .calib_1v8_offsets = 0x7676,
+/* TBD .max_clk_limit = 136000000, */
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data2 = {
+#ifdef APALIS_TK1_V10
+ .cd_gpio = -1,
+#else
+ .cd_gpio = TEGRA_GPIO_PV2,
+#endif
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .tap_delay = 0,
+/* .trim_delay = 0x2, */
+ .trim_delay = 0x3,
+ .uhs_mask = MMC_UHS_MASK_DDR50,
+ .calib_3v3_offsets = 0x7676,
+ .calib_1v8_offsets = 0x7676,
+};
+
+static struct tegra_sdhci_platform_data tegra_sdhci_platform_data3 = {
+ .cd_gpio = -1,
+ .wp_gpio = -1,
+ .power_gpio = -1,
+ .is_8bit = 1,
+ .tap_delay = 0x4,
+ .trim_delay = 0x4,
+ .ddr_trim_delay = 0x0,
+ .mmc_data = {
+ .built_in = 1,
+ .ocr_mask = MMC_OCR_1V8_MASK,
+ },
+ .ddr_clk_limit = 51000000,
+ .max_clk_limit = 200000000,
+ .calib_3v3_offsets = 0x0202,
+ .calib_1v8_offsets = 0x0202,
+};
+
+static struct platform_device tegra_sdhci_device0 = {
+ .name = "sdhci-tegra",
+ .id = 0,
+ .resource = sdhci_resource0,
+ .num_resources = ARRAY_SIZE(sdhci_resource0),
+ .dev = {
+ .dma_mask = &tegra_sdhci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ .platform_data = &tegra_sdhci_platform_data0,
+ },
+};
+
+static struct platform_device tegra_sdhci_device2 = {
+ .name = "sdhci-tegra",
+ .id = 2,
+ .resource = sdhci_resource2,
+ .num_resources = ARRAY_SIZE(sdhci_resource2),
+ .dev = {
+ .dma_mask = &tegra_sdhci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ .platform_data = &tegra_sdhci_platform_data2,
+ },
+};
+
+static struct platform_device tegra_sdhci_device3 = {
+ .name = "sdhci-tegra",
+ .id = 3,
+ .resource = sdhci_resource3,
+ .num_resources = ARRAY_SIZE(sdhci_resource3),
+ .dev = {
+ .dma_mask = &tegra_sdhci_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ .platform_data = &tegra_sdhci_platform_data3,
+ },
+};
+
+int __init apalis_tk1_sdhci_init(void)
+{
+ int nominal_core_mv;
+ int min_vcore_override_mv;
+ int boot_vcore_mv;
+ u32 speedo;
+
+ nominal_core_mv =
+ tegra_dvfs_rail_get_nominal_millivolts(tegra_core_rail);
+ if (nominal_core_mv) {
+ tegra_sdhci_platform_data0.nominal_vcore_mv = nominal_core_mv;
+ tegra_sdhci_platform_data2.nominal_vcore_mv = nominal_core_mv;
+ tegra_sdhci_platform_data3.nominal_vcore_mv = nominal_core_mv;
+ }
+ min_vcore_override_mv =
+ tegra_dvfs_rail_get_override_floor(tegra_core_rail);
+ if (min_vcore_override_mv) {
+ tegra_sdhci_platform_data0.min_vcore_override_mv =
+ min_vcore_override_mv;
+ tegra_sdhci_platform_data2.min_vcore_override_mv =
+ min_vcore_override_mv;
+ tegra_sdhci_platform_data3.min_vcore_override_mv =
+ min_vcore_override_mv;
+ }
+ boot_vcore_mv = tegra_dvfs_rail_get_boot_level(tegra_core_rail);
+ if (boot_vcore_mv) {
+ tegra_sdhci_platform_data0.boot_vcore_mv = boot_vcore_mv;
+ tegra_sdhci_platform_data2.boot_vcore_mv = boot_vcore_mv;
+ tegra_sdhci_platform_data3.boot_vcore_mv = boot_vcore_mv;
+ }
+
+/* TBD
+ tegra_sdhci_platform_data2.max_clk_limit = 204000000;
+
+ tegra_sdhci_platform_data0.default_drv_type =
+ MMC_SET_DRIVER_TYPE_A;
+
+ tegra_sdhci_platform_data0.max_clk_limit = 204000000;
+
+ tegra_sdhci_platform_data3.uhs_mask = MMC_MASK_HS200;
+TBD */
+
+ /*
+ * FIXME: Set max clk limit to 200MHz for SDMMC3 for PM375.
+ * Requesting 208MHz results in getting 204MHz from PLL_P
+ * and CRC errors are seen with same.
+ */
+ tegra_sdhci_platform_data0.max_clk_limit = 200000000;
+ tegra_sdhci_platform_data2.max_clk_limit = 200000000;
+
+ speedo = tegra_fuse_readl(FUSE_SOC_SPEEDO_0);
+ tegra_sdhci_platform_data0.cpu_speedo = speedo;
+ tegra_sdhci_platform_data2.cpu_speedo = speedo;
+ tegra_sdhci_platform_data3.cpu_speedo = speedo;
+
+ tegra_sdhci_platform_data0.disable_clock_gate = 1;
+ tegra_sdhci_platform_data2.disable_clock_gate = 1;
+
+/* TBD
+ tegra_sdhci_platform_data0.uhs_mask =
+ MMC_UHS_MASK_SDR50 | MMC_UHS_MASK_DDR50;
+ tegra_sdhci_platform_data2.uhs_mask =
+ MMC_UHS_MASK_SDR50; */
+
+/* TBD
+ tegra_sdhci_platform_data0.max_clk_limit = 204000000; */
+
+/* TBD
+ tegra_sdhci_platform_data2.uhs_mask =
+ MMC_UHS_MASK_SDR50;
+ tegra_sdhci_platform_data0.uhs_mask =
+ MMC_UHS_MASK_SDR50;
+ tegra_sdhci_platform_data3.max_clk_limit = 200000000;
+ tegra_sdhci_platform_data2.max_clk_limit = 204000000; */
+
+ platform_device_register(&tegra_sdhci_device3);
+ platform_device_register(&tegra_sdhci_device0);
+ platform_device_register(&tegra_sdhci_device2);
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-sensors.c b/arch/arm/mach-tegra/board-apalis-tk1-sensors.c
new file mode 100644
index 000000000000..4064fe7ca8a5
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-sensors.c
@@ -0,0 +1,2072 @@
+/*
+ * arch/arm/mach-tegra/board-apalis-tk1-sensors.c
+ *
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/mpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/nct1008.h>
+#include <linux/pid_thermal_gov.h>
+#include <linux/tegra-fuse.h>
+#include <linux/of_platform.h>
+#include <mach/edp.h>
+#include <mach/pinmux-t12.h>
+#include <mach/pinmux.h>
+#include <mach/io_dpd.h>
+#include <media/camera.h>
+#include <media/ar0330.h>
+#include <media/ar0261.h>
+#include <media/ar1335.h>
+#include <media/imx135.h>
+#include <media/imx179.h>
+#include <media/imx185.h>
+#include <media/dw9718.h>
+#include <media/as364x.h>
+#include <media/ov5693.h>
+#include <media/ov7695.h>
+#include <media/mt9m114.h>
+#include <media/ad5823.h>
+#include <media/max77387.h>
+
+#include <media/ov4689.h>
+#include <linux/platform_device.h>
+#include <media/soc_camera.h>
+#include <media/soc_camera_platform.h>
+#include <media/tegra_v4l2_camera.h>
+#include <linux/generic_adc_thermal.h>
+
+#include "cpu-tegra.h"
+#include "devices.h"
+#include "board.h"
+#include "board-common.h"
+#include "board-apalis-tk1.h"
+#include "tegra-board-id.h"
+
+/*
+ * Soc Camera platform driver for testing
+ */
+#if IS_ENABLED(CONFIG_SOC_CAMERA_PLATFORM)
+static int apalis_tk1_soc_camera_add(struct soc_camera_device *icd);
+static void apalis_tk1_soc_camera_del(struct soc_camera_device *icd);
+
+static int apalis_tk1_soc_camera_set_capture(struct soc_camera_platform_info *info,
+ int enable)
+{
+ /* TODO: probably add clk opertaion here */
+ return 0; /* camera sensor always enabled */
+}
+
+static struct soc_camera_platform_info apalis_tk1_soc_camera_info = {
+ .format_name = "RGB4",
+ .format_depth = 32,
+ .format = {
+ .code = V4L2_MBUS_FMT_RGBA8888_4X8_LE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .field = V4L2_FIELD_NONE,
+ .width = 1280,
+ .height = 720,
+ },
+ .set_capture = apalis_tk1_soc_camera_set_capture,
+};
+
+static struct tegra_camera_platform_data apalis_tk1_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 4,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link apalis_tk1_soc_camera_link = {
+ .bus_id = 1, /* This must match the .id of tegra_vi01_device */
+ .add_device = apalis_tk1_soc_camera_add,
+ .del_device = apalis_tk1_soc_camera_del,
+ .module_name = "soc_camera_platform",
+ .priv = &apalis_tk1_camera_platform_data,
+ .dev_priv = &apalis_tk1_soc_camera_info,
+};
+
+static struct platform_device *apalis_tk1_pdev;
+
+static void apalis_tk1_soc_camera_release(struct device *dev)
+{
+ soc_camera_platform_release(&apalis_tk1_pdev);
+}
+
+static int apalis_tk1_soc_camera_add(struct soc_camera_device *icd)
+{
+ return soc_camera_platform_add(icd, &apalis_tk1_pdev,
+ &apalis_tk1_soc_camera_link,
+ apalis_tk1_soc_camera_release, 0);
+}
+
+static void apalis_tk1_soc_camera_del(struct soc_camera_device *icd)
+{
+ soc_camera_platform_del(icd, apalis_tk1_pdev, &apalis_tk1_soc_camera_link);
+}
+
+static struct platform_device apalis_tk1_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &apalis_tk1_soc_camera_link,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_IMX135)
+static int apalis_tk1_imx135_power(struct device *dev, int enable)
+{
+ return 0;
+}
+
+struct imx135_platform_data apalis_tk1_imx135_data;
+
+static struct i2c_board_info apalis_tk1_imx135_camera_i2c_device = {
+ I2C_BOARD_INFO("imx135_v4l2", 0x10),
+ .platform_data = &apalis_tk1_imx135_data,
+};
+
+static struct tegra_camera_platform_data apalis_tk1_imx135_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 4,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link imx135_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_imx135_camera_i2c_device,
+ .module_name = "imx135_v4l2",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_imx135_power,
+ .priv = &apalis_tk1_imx135_camera_platform_data,
+};
+
+static struct platform_device apalis_tk1_imx135_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 1,
+ .dev = {
+ .platform_data = &imx135_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0261)
+static int apalis_tk1_ar0261_power(struct device *dev, int enable)
+{
+ return 0;
+}
+
+struct ar0261_platform_data apalis_tk1_ar0261_data;
+
+static struct i2c_board_info apalis_tk1_ar0261_camera_i2c_device = {
+ I2C_BOARD_INFO("ar0261_v4l2", 0x36),
+ .platform_data = &apalis_tk1_ar0261_data,
+};
+
+static struct tegra_camera_platform_data apalis_tk1_ar0261_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_C,
+ .lanes = 1,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link ar0261_iclink = {
+ .bus_id = 1, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_ar0261_camera_i2c_device,
+ .module_name = "ar0261_v4l2",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_ar0261_power,
+ .priv = &apalis_tk1_ar0261_camera_platform_data,
+};
+
+static struct platform_device apalis_tk1_ar0261_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 2,
+ .dev = {
+ .platform_data = &ar0261_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0330)
+static int apalis_tk1_ar0330_power(struct device *dev, int enable)
+{
+ return 0;
+}
+
+struct ar0330_platform_data apalis_tk1_ar0330_data;
+
+static struct i2c_board_info apalis_tk1_ar0330_camera_i2c_device = {
+ I2C_BOARD_INFO("ar0330_v4l2", 0x18),
+ .platform_data = &apalis_tk1_ar0330_data,
+};
+
+static struct tegra_camera_platform_data apalis_tk1_ar0330_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 1,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link ar0330_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_ar0330_camera_i2c_device,
+ .module_name = "ar0330_v4l2",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_ar0330_power,
+ .priv = &apalis_tk1_ar0330_camera_platform_data,
+};
+
+static struct platform_device apalis_tk1_ar0330_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 3,
+ .dev = {
+ .platform_data = &ar0330_iclink,
+ },
+};
+#endif
+
+static struct tegra_io_dpd csia_io = {
+ .name = "CSIA",
+ .io_dpd_reg_index = 0,
+ .io_dpd_bit = 0,
+};
+
+static struct tegra_io_dpd csib_io = {
+ .name = "CSIB",
+ .io_dpd_reg_index = 0,
+ .io_dpd_bit = 1,
+};
+
+static struct tegra_io_dpd csie_io = {
+ .name = "CSIE",
+ .io_dpd_reg_index = 1,
+ .io_dpd_bit = 12,
+};
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AP1302)
+static int apalis_tk1_ap1302_power(struct device *dev, int enable)
+{
+ if(enable) {
+ /* disable CSIA IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info apalis_tk1_ap1302_camera_i2c_device = {
+ I2C_BOARD_INFO("ap1302", 0x3c),
+};
+
+static struct tegra_camera_platform_data apalis_tk1_ap1302_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 4,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link ap1302_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_ap1302_camera_i2c_device,
+ .module_name = "ap1302",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_ap1302_power,
+ .priv = &apalis_tk1_ap1302_camera_platform_data,
+};
+
+static struct platform_device apalis_tk1_ap1302_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 4,
+ .dev = {
+ .platform_data = &ap1302_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_OV5640)
+static int apalis_tk1_ov5640_power(struct device *dev, int enable)
+ {
+ if(enable) {
+ tegra_io_dpd_disable(&csia_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ }
+ return 0;
+ }
+
+static struct i2c_board_info apalis_tk1_ov5640_camera_i2c_device = {
+ I2C_BOARD_INFO("ov5640", 0x3c),
+};
+
+static struct tegra_camera_platform_data apalis_tk1_ov5640_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 2,
+ .continuous_clk = 1,
+};
+
+static struct soc_camera_link ov5640_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_ov5640_camera_i2c_device,
+ .module_name = "ov5640",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_ov5640_power,
+ .priv = &apalis_tk1_ov5640_camera_platform_data,
+};
+
+static struct platform_device apalis_tk1_ov5640_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 5,
+ .dev = {
+ .platform_data = &ov5640_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_TC358743)
+static int apalis_tk1_tc358743_power_a(struct device *dev, int enable)
+{
+ if(enable) {
+ tegra_io_dpd_disable(&csia_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info apalis_tk1_tc358743_camera_i2c_device_a = {
+ I2C_BOARD_INFO("tc358743", 0x0f),
+};
+
+static struct tegra_camera_platform_data apalis_tk1_tc358743_camera_platform_data_a = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 2,
+ .continuous_clk = 1,
+};
+
+static struct soc_camera_link tc358743_iclink_a = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_tc358743_camera_i2c_device_a,
+ .module_name = "tc358743",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_tc358743_power_a,
+ .priv = &apalis_tk1_tc358743_camera_platform_data_a,
+};
+
+static struct platform_device apalis_tk1_tc358743_soc_camera_device_a = {
+ .name = "soc-camera-pdrv",
+ .id = 6,
+ .dev = {
+ .platform_data = &tc358743_iclink_a,
+ },
+};
+
+/* second tc358743 */
+static int apalis_tk1_tc358743_power_b(struct device *dev, int enable)
+{
+ if(enable) {
+ tegra_io_dpd_disable(&csib_io);
+ } else {
+ tegra_io_dpd_enable(&csib_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info apalis_tk1_tc358743_camera_i2c_device_b = {
+ I2C_BOARD_INFO("tc358743", 0x0f),
+};
+
+static struct tegra_camera_platform_data apalis_tk1_tc358743_camera_platform_data_b = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_B,
+ .lanes = 2,
+ .continuous_clk = 1,
+};
+
+static struct soc_camera_link tc358743_iclink_b = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_tc358743_camera_i2c_device_b,
+ .module_name = "tc358743",
+ .i2c_adapter_id = 0,
+ .power = apalis_tk1_tc358743_power_b,
+ .priv = &apalis_tk1_tc358743_camera_platform_data_b,
+};
+
+static struct platform_device apalis_tk1_tc358743_soc_camera_device_b = {
+ .name = "soc-camera-pdrv",
+ .id = 7,
+ .dev = {
+ .platform_data = &tc358743_iclink_b,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_ADV7280)
+/* ADV7280M CSI C */
+static int apalis_tk1_adv7280_power_c(struct device *dev, int enable)
+{
+ if(enable) {
+ tegra_io_dpd_disable(&csie_io);
+ } else {
+ tegra_io_dpd_enable(&csie_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info apalis_tk1_adv7280_camera_i2c_device_c = {
+ I2C_BOARD_INFO("adv7280", 0x20),
+};
+
+static struct tegra_camera_platform_data apalis_tk1_adv7280_camera_platform_data_c = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_C,
+ .lanes = 1,
+ .continuous_clk = 1,
+};
+
+static struct soc_camera_link adv7280_iclink_c = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &apalis_tk1_adv7280_camera_i2c_device_c,
+ .module_name = "adv7280",
+ .i2c_adapter_id = 2,
+ .power = apalis_tk1_adv7280_power_c,
+ .priv = &apalis_tk1_adv7280_camera_platform_data_c,
+};
+
+static struct platform_device apalis_tk1_adv7280_soc_camera_device_c = {
+ .name = "soc-camera-pdrv",
+ .id = 8,
+ .dev = {
+ .platform_data = &adv7280_iclink_c,
+ },
+};
+#endif
+
+static struct regulator *apalis_tk1_vcmvdd;
+
+static int apalis_tk1_get_extra_regulators(void)
+{
+ if (!apalis_tk1_vcmvdd) {
+ apalis_tk1_vcmvdd = regulator_get(NULL, "avdd_af1_cam");
+ if (WARN_ON(IS_ERR(apalis_tk1_vcmvdd))) {
+ pr_err("%s: can't get regulator avdd_af1_cam: %ld\n",
+ __func__, PTR_ERR(apalis_tk1_vcmvdd));
+ regulator_put(apalis_tk1_vcmvdd);
+ apalis_tk1_vcmvdd = NULL;
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int apalis_tk1_ar0330_front_power_on(struct ar0330_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd)))
+ return -EFAULT;
+
+ /* disable CSIE IOs DPD mode to turn on front camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csie_io);
+
+ gpio_set_value(CAM2_PWDN, 0);
+
+ err = regulator_enable(pw->iovdd);
+ if (unlikely(err))
+ goto ar0330_front_iovdd_fail;
+
+ usleep_range(1000, 1100);
+ err = regulator_enable(pw->avdd);
+ if (unlikely(err))
+ goto ar0330_front_avdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM2_PWDN, 1);
+
+ return 0;
+ ar0330_front_avdd_fail:
+ regulator_disable(pw->iovdd);
+
+ ar0330_front_iovdd_fail:
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ar0330_front_power_off(struct ar0330_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd))) {
+ /* put CSIE IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csie_io);
+ return -EFAULT;
+ }
+
+ gpio_set_value(CAM2_PWDN, 0);
+
+ usleep_range(1, 2);
+
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->avdd);
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return 0;
+}
+
+struct ar0330_platform_data apalis_tk1_ar0330_front_data = {
+ .power_on = apalis_tk1_ar0330_front_power_on,
+ .power_off = apalis_tk1_ar0330_front_power_off,
+ .dev_name = "ar0330.1",
+ .mclk_name = "mclk2",
+};
+
+static int apalis_tk1_ar0330_power_on(struct ar0330_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd)))
+ return -EFAULT;
+
+ /* disable CSIE IOs DPD mode to turn on front camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ gpio_set_value(CAM1_PWDN, 0);
+
+ err = regulator_enable(pw->iovdd);
+ if (unlikely(err))
+ goto ar0330_iovdd_fail;
+
+ usleep_range(1000, 1100);
+ err = regulator_enable(pw->avdd);
+ if (unlikely(err))
+ goto ar0330_avdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM1_PWDN, 1);
+
+ return 0;
+ ar0330_avdd_fail:
+ regulator_disable(pw->iovdd);
+
+ ar0330_iovdd_fail:
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ar0330_power_off(struct ar0330_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd))) {
+ /* put CSIE IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ gpio_set_value(CAM1_PWDN, 0);
+
+ usleep_range(1, 2);
+
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->avdd);
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+struct ar0330_platform_data apalis_tk1_ar0330_data = {
+ .power_on = apalis_tk1_ar0330_power_on,
+ .power_off = apalis_tk1_ar0330_power_off,
+ .dev_name = "ar0330",
+};
+
+static int apalis_tk1_ov4689_power_on(struct ov4689_power_rail *pw)
+{
+ pr_info("%s: ++\n", __func__);
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ gpio_set_value(TEGRA_GPIO_PBB5, 0);
+ usleep_range(10, 20);
+ gpio_set_value(TEGRA_GPIO_PBB5, 1);
+ usleep_range(820, 1000);
+
+ return 1;
+}
+
+static int apalis_tk1_ov4689_power_off(struct ov4689_power_rail *pw)
+{
+ pr_info("%s: ++\n", __func__);
+
+ gpio_set_value(TEGRA_GPIO_PBB5, 0);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+
+ return 0;
+}
+
+static int apalis_tk1_ar0261_power_on(struct ar0261_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd || !pw->dvdd)))
+ return -EFAULT;
+
+ /* disable CSIE IOs DPD mode to turn on front camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csie_io);
+
+ if (apalis_tk1_get_extra_regulators())
+ goto apalis_tk1_ar0261_poweron_fail;
+
+ gpio_set_value(CAM_RSTN, 0);
+ gpio_set_value(CAM_AF_PWDN, 1);
+
+ err = regulator_enable(apalis_tk1_vcmvdd);
+ if (unlikely(err))
+ goto ar0261_vcm_fail;
+
+ err = regulator_enable(pw->dvdd);
+ if (unlikely(err))
+ goto ar0261_dvdd_fail;
+
+ err = regulator_enable(pw->avdd);
+ if (unlikely(err))
+ goto ar0261_avdd_fail;
+
+ err = regulator_enable(pw->iovdd);
+ if (unlikely(err))
+ goto ar0261_iovdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM2_PWDN, 1);
+
+ gpio_set_value(CAM_RSTN, 1);
+
+ return 0;
+ ar0261_iovdd_fail:
+ regulator_disable(pw->dvdd);
+
+ ar0261_dvdd_fail:
+ regulator_disable(pw->avdd);
+
+ ar0261_avdd_fail:
+ regulator_disable(apalis_tk1_vcmvdd);
+
+ ar0261_vcm_fail:
+ pr_err("%s vcmvdd failed.\n", __func__);
+ return -ENODEV;
+
+ apalis_tk1_ar0261_poweron_fail:
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ar0261_power_off(struct ar0261_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd || !pw->dvdd ||
+ !apalis_tk1_vcmvdd))) {
+ /* put CSIE IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csie_io);
+ return -EFAULT;
+ }
+
+ gpio_set_value(CAM_RSTN, 0);
+
+ usleep_range(1, 2);
+
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->dvdd);
+ regulator_disable(pw->avdd);
+ regulator_disable(apalis_tk1_vcmvdd);
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return 0;
+}
+
+struct ar0261_platform_data apalis_tk1_ar0261_data = {
+ .power_on = apalis_tk1_ar0261_power_on,
+ .power_off = apalis_tk1_ar0261_power_off,
+ .mclk_name = "mclk2",
+};
+
+static int apalis_tk1_imx135_get_extra_regulators(struct imx135_power_rail *pw)
+{
+ if (!pw->ext_reg1) {
+ pw->ext_reg1 = regulator_get(NULL, "imx135_reg1");
+ if (WARN_ON(IS_ERR(pw->ext_reg1))) {
+ pr_err("%s: can't get regulator imx135_reg1: %ld\n",
+ __func__, PTR_ERR(pw->ext_reg1));
+ pw->ext_reg1 = NULL;
+ return -ENODEV;
+ }
+ }
+
+ if (!pw->ext_reg2) {
+ pw->ext_reg2 = regulator_get(NULL, "imx135_reg2");
+ if (WARN_ON(IS_ERR(pw->ext_reg2))) {
+ pr_err("%s: can't get regulator imx135_reg2: %ld\n",
+ __func__, PTR_ERR(pw->ext_reg2));
+ pw->ext_reg2 = NULL;
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int apalis_tk1_imx135_power_on(struct imx135_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd)))
+ return -EFAULT;
+
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ if (apalis_tk1_imx135_get_extra_regulators(pw))
+ goto imx135_poweron_fail;
+
+ err = regulator_enable(pw->ext_reg1);
+ if (unlikely(err))
+ goto imx135_ext_reg1_fail;
+
+ err = regulator_enable(pw->ext_reg2);
+ if (unlikely(err))
+ goto imx135_ext_reg2_fail;
+
+ gpio_set_value(CAM_AF_PWDN, 1);
+ gpio_set_value(CAM1_PWDN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto imx135_avdd_fail;
+
+ err = regulator_enable(pw->iovdd);
+ if (err)
+ goto imx135_iovdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM1_PWDN, 1);
+
+ usleep_range(300, 310);
+
+ return 1;
+
+ imx135_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ imx135_avdd_fail:
+ if (pw->ext_reg2)
+ regulator_disable(pw->ext_reg2);
+
+ imx135_ext_reg2_fail:
+ if (pw->ext_reg1)
+ regulator_disable(pw->ext_reg1);
+ gpio_set_value(CAM_AF_PWDN, 0);
+
+ imx135_ext_reg1_fail:
+ imx135_poweron_fail:
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_imx135_power_off(struct imx135_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd))) {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->avdd);
+
+ regulator_disable(pw->ext_reg1);
+ regulator_disable(pw->ext_reg2);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+static int apalis_tk1_ar1335_power_on(struct ar1335_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd)))
+ return -EFAULT;
+
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ gpio_set_value(CAM_RSTN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto ar1335_avdd_fail;
+
+ err = regulator_enable(pw->iovdd);
+ if (err)
+ goto ar1335_iovdd_fail;
+
+ err = regulator_enable(pw->dvdd);
+ if (err)
+ goto ar1335_dvdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM_RSTN, 1);
+
+ usleep_range(300, 310);
+
+ return 0;
+
+ ar1335_dvdd_fail:
+ regulator_disable(pw->iovdd);
+
+ ar1335_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ ar1335_avdd_fail:
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ar1335_power_off(struct ar1335_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd))) {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->avdd);
+ regulator_disable(pw->dvdd);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+static int apalis_tk1_imx179_power_on(struct imx179_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd)))
+ return -EFAULT;
+
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ gpio_set_value(CAM_AF_PWDN, 1);
+ gpio_set_value(CAM_RSTN, 0);
+ gpio_set_value(CAM1_PWDN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto imx179_avdd_fail;
+
+ err = regulator_enable(pw->iovdd);
+ if (err)
+ goto imx179_iovdd_fail;
+
+ err = regulator_enable(pw->dvdd);
+ if (err)
+ goto imx179_dvdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM_RSTN, 1);
+
+ usleep_range(300, 310);
+
+ return 1;
+
+ imx179_dvdd_fail:
+ regulator_disable(pw->iovdd);
+
+ imx179_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ imx179_avdd_fail:
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_imx179_power_off(struct imx179_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd))) {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ regulator_disable(pw->dvdd);
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->avdd);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+struct ar1335_platform_data apalis_tk1_ar1335_data = {
+ .flash_cap = {
+ .enable = 1,
+ .edge_trig_en = 1,
+ .start_edge = 0,
+ .repeat = 1,
+ .delay_frm = 0,
+ },
+ .power_on = apalis_tk1_ar1335_power_on,
+ .power_off = apalis_tk1_ar1335_power_off,
+};
+
+static int apalis_tk1_imx185_power_on(struct imx185_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd)))
+ return -EFAULT;
+
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ gpio_set_value(CAM1_PWDN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->dvdd);
+ if (err)
+ goto imx185_dvdd_fail;
+
+ err = regulator_enable(pw->iovdd);
+ if (err)
+ goto imx185_iovdd_fail;
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto imx185_avdd_fail;
+
+ usleep_range(1, 2);
+ gpio_set_value(CAM1_PWDN, 1);
+
+ usleep_range(300, 310);
+
+ return 0;
+
+ imx185_avdd_fail:
+ regulator_disable(pw->iovdd);
+
+ imx185_iovdd_fail:
+ regulator_disable(pw->dvdd);
+
+ imx185_dvdd_fail:
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s failed.\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_imx185_power_off(struct imx185_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->iovdd || !pw->avdd))) {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ regulator_disable(pw->avdd);
+ regulator_disable(pw->iovdd);
+ regulator_disable(pw->dvdd);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+struct imx135_platform_data apalis_tk1_imx135_data = {
+ .flash_cap = {
+ .enable = 1,
+ .edge_trig_en = 1,
+ .start_edge = 0,
+ .repeat = 1,
+ .delay_frm = 0,
+ },
+ .ext_reg = true,
+ .power_on = apalis_tk1_imx135_power_on,
+ .power_off = apalis_tk1_imx135_power_off,
+};
+
+struct imx179_platform_data apalis_tk1_imx179_data = {
+ .flash_cap = {
+ .enable = 1,
+ .edge_trig_en = 1,
+ .start_edge = 0,
+ .repeat = 1,
+ .delay_frm = 0,
+ },
+ .power_on = apalis_tk1_imx179_power_on,
+ .power_off = apalis_tk1_imx179_power_off,
+};
+
+struct ov4689_platform_data apalis_tk1_ov4689_data = {
+ .flash_cap = {
+ .enable = 0,
+ .edge_trig_en = 1,
+ .start_edge = 0,
+ .repeat = 1,
+ .delay_frm = 0,
+ },
+ .power_on = apalis_tk1_ov4689_power_on,
+ .power_off = apalis_tk1_ov4689_power_off,
+};
+
+static int apalis_tk1_dw9718_power_on(struct dw9718_power_rail *pw)
+{
+ int err;
+ pr_info("%s\n", __func__);
+
+ if (unlikely(!pw || !pw->vdd || !pw->vdd_i2c || !pw->vana))
+ return -EFAULT;
+
+ err = regulator_enable(pw->vdd);
+ if (unlikely(err))
+ goto dw9718_vdd_fail;
+
+ err = regulator_enable(pw->vdd_i2c);
+ if (unlikely(err))
+ goto dw9718_i2c_fail;
+
+ err = regulator_enable(pw->vana);
+ if (unlikely(err))
+ goto dw9718_ana_fail;
+
+ usleep_range(1000, 1020);
+
+ /* return 1 to skip the in-driver power_on sequence */
+ pr_debug("%s --\n", __func__);
+ return 1;
+
+ dw9718_ana_fail:
+ regulator_disable(pw->vdd_i2c);
+
+ dw9718_i2c_fail:
+ regulator_disable(pw->vdd);
+
+ dw9718_vdd_fail:
+ pr_err("%s FAILED\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_dw9718_power_off(struct dw9718_power_rail *pw)
+{
+ pr_info("%s\n", __func__);
+
+ if (unlikely(!pw || !pw->vdd || !pw->vdd_i2c || !pw->vana))
+ return -EFAULT;
+
+ regulator_disable(pw->vdd);
+ regulator_disable(pw->vdd_i2c);
+ regulator_disable(pw->vana);
+
+ return 1;
+}
+
+static u16 dw9718_devid;
+static int apalis_tk1_dw9718_detect(void *buf, size_t size)
+{
+ dw9718_devid = 0x9718;
+ return 0;
+}
+
+static struct nvc_focus_cap dw9718_cap = {
+ .settle_time = 30,
+ .slew_rate = 0x3A200C,
+ .focus_macro = 450,
+ .focus_infinity = 200,
+ .focus_hyper = 200,
+};
+
+static struct dw9718_platform_data apalis_tk1_dw9718_data = {
+ .cfg = NVC_CFG_NODEV,
+ .num = 0,
+ .sync = 0,
+ .dev_name = "focuser",
+ .cap = &dw9718_cap,
+ .power_on = apalis_tk1_dw9718_power_on,
+ .power_off = apalis_tk1_dw9718_power_off,
+ .detect = apalis_tk1_dw9718_detect,
+};
+
+static struct as364x_platform_data apalis_tk1_as3648_data = {
+ .config = {
+ .led_mask = 3,
+ .max_total_current_mA = 1000,
+ .max_peak_current_mA = 600,
+ .max_torch_current_mA = 600,
+ .vin_low_v_run_mV = 3070,
+ .strobe_type = 1,
+ },
+ .pinstate = {
+ .mask = 1 << (CAM_FLASH_STROBE - TEGRA_GPIO_PBB0),
+ .values = 1 << (CAM_FLASH_STROBE - TEGRA_GPIO_PBB0)
+ },
+ .dev_name = "torch",
+ .type = AS3648,
+ .gpio_strobe = CAM_FLASH_STROBE,
+};
+
+static int apalis_tk1_ov7695_power_on(struct ov7695_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd)))
+ return -EFAULT;
+
+ /* disable CSIE IOs DPD mode to turn on front camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csie_io);
+
+ gpio_set_value(CAM2_PWDN, 0);
+ usleep_range(1000, 1020);
+
+ err = regulator_enable(pw->avdd);
+ if (unlikely(err))
+ goto ov7695_avdd_fail;
+ usleep_range(300, 320);
+
+ err = regulator_enable(pw->iovdd);
+ if (unlikely(err))
+ goto ov7695_iovdd_fail;
+ usleep_range(1000, 1020);
+
+ gpio_set_value(CAM2_PWDN, 1);
+ usleep_range(1000, 1020);
+
+ return 0;
+
+ ov7695_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ ov7695_avdd_fail:
+ gpio_set_value(CAM_RSTN, 0);
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ov7695_power_off(struct ov7695_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->avdd || !pw->iovdd))) {
+ /* put CSIE IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csie_io);
+ return -EFAULT;
+ }
+ usleep_range(100, 120);
+
+ gpio_set_value(CAM2_PWDN, 0);
+ usleep_range(100, 120);
+
+ regulator_disable(pw->iovdd);
+ usleep_range(100, 120);
+
+ regulator_disable(pw->avdd);
+
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return 0;
+}
+
+struct ov7695_platform_data apalis_tk1_ov7695_pdata = {
+ .power_on = apalis_tk1_ov7695_power_on,
+ .power_off = apalis_tk1_ov7695_power_off,
+ .mclk_name = "mclk2",
+};
+
+static int apalis_tk1_mt9m114_power_on(struct mt9m114_power_rail *pw)
+{
+ int err;
+ if (unlikely(!pw || !pw->avdd || !pw->iovdd))
+ return -EFAULT;
+
+ /* disable CSIE IOs DPD mode to turn on front camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csie_io);
+
+ gpio_set_value(CAM_RSTN, 0);
+ gpio_set_value(CAM2_PWDN, 1);
+ usleep_range(1000, 1020);
+
+ err = regulator_enable(pw->iovdd);
+ if (unlikely(err))
+ goto mt9m114_iovdd_fail;
+
+ err = regulator_enable(pw->avdd);
+ if (unlikely(err))
+ goto mt9m114_avdd_fail;
+
+ usleep_range(1000, 1020);
+ gpio_set_value(CAM_RSTN, 1);
+ gpio_set_value(CAM2_PWDN, 0);
+ usleep_range(1000, 1020);
+
+ /* return 1 to skip the in-driver power_on swquence */
+ return 1;
+
+ mt9m114_avdd_fail:
+ regulator_disable(pw->iovdd);
+
+ mt9m114_iovdd_fail:
+ gpio_set_value(CAM_RSTN, 0);
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return -ENODEV;
+}
+
+static int apalis_tk1_mt9m114_power_off(struct mt9m114_power_rail *pw)
+{
+ if (unlikely(!pw || !pw->avdd || !pw->iovdd)) {
+ /* put CSIE IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csie_io);
+ return -EFAULT;
+ }
+
+ usleep_range(100, 120);
+ gpio_set_value(CAM_RSTN, 0);
+ usleep_range(100, 120);
+ regulator_disable(pw->avdd);
+ usleep_range(100, 120);
+ regulator_disable(pw->iovdd);
+
+ /* put CSIE IOs into DPD mode to save additional power for apalis_tk1 */
+ tegra_io_dpd_enable(&csie_io);
+ return 1;
+}
+
+struct mt9m114_platform_data apalis_tk1_mt9m114_pdata = {
+ .power_on = apalis_tk1_mt9m114_power_on,
+ .power_off = apalis_tk1_mt9m114_power_off,
+ .mclk_name = "mclk2",
+};
+
+static int apalis_tk1_ov5693_power_on(struct ov5693_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->dovdd || !pw->avdd)))
+ return -EFAULT;
+
+ /* disable CSIA/B IOs DPD mode to turn on camera for apalis_tk1 */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+
+ if (apalis_tk1_get_extra_regulators())
+ goto ov5693_poweron_fail;
+
+ gpio_set_value(CAM1_PWDN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto ov5693_avdd_fail;
+
+ err = regulator_enable(pw->dovdd);
+ if (err)
+ goto ov5693_iovdd_fail;
+
+ udelay(2);
+ gpio_set_value(CAM1_PWDN, 1);
+
+ err = regulator_enable(apalis_tk1_vcmvdd);
+ if (unlikely(err))
+ goto ov5693_vcmvdd_fail;
+
+ usleep_range(1000, 1110);
+
+ return 0;
+
+ ov5693_vcmvdd_fail:
+ regulator_disable(pw->dovdd);
+
+ ov5693_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ ov5693_avdd_fail:
+ gpio_set_value(CAM1_PWDN, 0);
+
+ ov5693_poweron_fail:
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ pr_err("%s FAILED\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ov5693_power_off(struct ov5693_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->dovdd || !pw->avdd))) {
+ /* put CSIA/B IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return -EFAULT;
+ }
+
+ usleep_range(21, 25);
+ gpio_set_value(CAM1_PWDN, 0);
+ udelay(2);
+
+ regulator_disable(apalis_tk1_vcmvdd);
+ regulator_disable(pw->dovdd);
+ regulator_disable(pw->avdd);
+
+ /*
+ * put CSIA/B IOs into DPD mode to save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ return 0;
+}
+
+static struct nvc_gpio_pdata ov5693_gpio_pdata[] = {
+ {OV5693_GPIO_TYPE_PWRDN, CAM1_PWDN, true, 0,},
+};
+
+#define NV_GUID(a, b, c, d, e, f, g, h) \
+ ((u64) ((((a)&0xffULL) << 56ULL) | (((b)&0xffULL) << 48ULL) | \
+ (((c)&0xffULL) << 40ULL) | (((d)&0xffULL) << 32ULL) | \
+ (((e)&0xffULL) << 24ULL) | (((f)&0xffULL) << 16ULL) | \
+ (((g)&0xffULL) << 8ULL) | (((h)&0xffULL))))
+
+static struct nvc_imager_cap ov5693_cap = {
+ .identifier = "OV5693",
+ .sensor_nvc_interface = 3,
+ .pixel_types[0] = 0x101,
+ .orientation = 0,
+ .direction = 0,
+ .initial_clock_rate_khz = 6000,
+ .clock_profiles[0] = {
+ .external_clock_khz = 24000,
+ .clock_multiplier = 8000000, /* value * 1000000 */
+ },
+ .clock_profiles[1] = {
+ .external_clock_khz = 0,
+ .clock_multiplier = 0,
+ },
+ .h_sync_edge = 0,
+ .v_sync_edge = 0,
+ .mclk_on_vgp0 = 0,
+ .csi_port = 0,
+ .data_lanes = 2,
+ .virtual_channel_id = 0,
+ .discontinuous_clk_mode = 1,
+ .cil_threshold_settle = 0,
+ .min_blank_time_width = 16,
+ .min_blank_time_height = 16,
+ .preferred_mode_index = 0,
+ .focuser_guid = NV_GUID('f', '_', 'A', 'D', '5', '8', '2', '3'),
+ .torch_guid = NV_GUID('l', '_', 'N', 'V', 'C', 'A', 'M', '0'),
+ .cap_version = NVC_IMAGER_CAPABILITIES_VERSION2,
+ .flash_control_enabled = 0,
+ .adjustable_flash_timing = 0,
+ .is_hdr = 1,
+};
+
+static struct ov5693_platform_data apalis_tk1_ov5693_pdata = {
+ .gpio_count = ARRAY_SIZE(ov5693_gpio_pdata),
+ .gpio = ov5693_gpio_pdata,
+ .power_on = apalis_tk1_ov5693_power_on,
+ .power_off = apalis_tk1_ov5693_power_off,
+ .dev_name = "ov5693",
+ .cap = &ov5693_cap,
+ .mclk_name = "mclk",
+ .regulators = {
+ .avdd = "avdd_ov5693",
+ .dvdd = "dvdd",
+ .dovdd = "dovdd",
+ },
+ .has_eeprom = 1,
+};
+
+static struct imx185_platform_data apalis_tk1_imx185_data = {
+ .power_on = apalis_tk1_imx185_power_on,
+ .power_off = apalis_tk1_imx185_power_off,
+ .mclk_name = "mclk",
+};
+
+static int apalis_tk1_ov5693_front_power_on(struct ov5693_power_rail *pw)
+{
+ int err;
+
+ if (unlikely(WARN_ON(!pw || !pw->dovdd || !pw->avdd)))
+ return -EFAULT;
+
+ if (apalis_tk1_get_extra_regulators())
+ goto ov5693_front_poweron_fail;
+
+ gpio_set_value(CAM2_PWDN, 0);
+ gpio_set_value(CAM_RSTN, 0);
+ usleep_range(10, 20);
+
+ err = regulator_enable(pw->avdd);
+ if (err)
+ goto ov5693_front_avdd_fail;
+
+ err = regulator_enable(pw->dovdd);
+ if (err)
+ goto ov5693_front_iovdd_fail;
+
+ udelay(2);
+ gpio_set_value(CAM2_PWDN, 1);
+ gpio_set_value(CAM_RSTN, 1);
+
+ err = regulator_enable(apalis_tk1_vcmvdd);
+ if (unlikely(err))
+ goto ov5693_front_vcmvdd_fail;
+
+ usleep_range(1000, 1110);
+
+ return 0;
+
+ ov5693_front_vcmvdd_fail:
+ regulator_disable(pw->dovdd);
+
+ ov5693_front_iovdd_fail:
+ regulator_disable(pw->avdd);
+
+ ov5693_front_avdd_fail:
+ gpio_set_value(CAM2_PWDN, 0);
+ gpio_set_value(CAM_RSTN, 0);
+
+ ov5693_front_poweron_fail:
+ pr_err("%s FAILED\n", __func__);
+ return -ENODEV;
+}
+
+static int apalis_tk1_ov5693_front_power_off(struct ov5693_power_rail *pw)
+{
+ if (unlikely(WARN_ON(!pw || !pw->dovdd || !pw->avdd)))
+ return -EFAULT;
+
+ usleep_range(21, 25);
+ gpio_set_value(CAM2_PWDN, 0);
+ gpio_set_value(CAM_RSTN, 0);
+ udelay(2);
+
+ regulator_disable(apalis_tk1_vcmvdd);
+ regulator_disable(pw->dovdd);
+ regulator_disable(pw->avdd);
+
+ return 0;
+}
+
+static struct nvc_gpio_pdata ov5693_front_gpio_pdata[] = {
+ {OV5693_GPIO_TYPE_PWRDN, CAM2_PWDN, true, 0,},
+ {OV5693_GPIO_TYPE_PWRDN, CAM_RSTN, true, 0,},
+};
+
+static struct nvc_imager_cap ov5693_front_cap = {
+ .identifier = "OV5693.1",
+ .sensor_nvc_interface = 4,
+ .pixel_types[0] = 0x101,
+ .orientation = 0,
+ .direction = 1,
+ .initial_clock_rate_khz = 6000,
+ .clock_profiles[0] = {
+ .external_clock_khz = 24000,
+ .clock_multiplier = 8000000, /* value * 1000000 */
+ },
+ .clock_profiles[1] = {
+ .external_clock_khz = 0,
+ .clock_multiplier = 0,
+ },
+ .h_sync_edge = 0,
+ .v_sync_edge = 0,
+ .mclk_on_vgp0 = 0,
+ .csi_port = 1,
+ .data_lanes = 2,
+ .virtual_channel_id = 0,
+ .discontinuous_clk_mode = 1,
+ .cil_threshold_settle = 0,
+ .min_blank_time_width = 16,
+ .min_blank_time_height = 16,
+ .preferred_mode_index = 0,
+ .focuser_guid = 0,
+ .torch_guid = 0,
+ .cap_version = NVC_IMAGER_CAPABILITIES_VERSION2,
+ .flash_control_enabled = 0,
+ .adjustable_flash_timing = 0,
+ .is_hdr = 1,
+};
+
+static struct ov5693_platform_data apalis_tk1_ov5693_front_pdata = {
+ .gpio_count = ARRAY_SIZE(ov5693_front_gpio_pdata),
+ .gpio = ov5693_front_gpio_pdata,
+ .power_on = apalis_tk1_ov5693_front_power_on,
+ .power_off = apalis_tk1_ov5693_front_power_off,
+ .dev_name = "ov5693.1",
+ .mclk_name = "mclk2",
+ .cap = &ov5693_front_cap,
+ .regulators = {
+ .avdd = "vana",
+ .dvdd = "vdig",
+ .dovdd = "vif",
+ },
+ .has_eeprom = 0,
+};
+
+static int apalis_tk1_ad5823_power_on(struct ad5823_platform_data *pdata)
+{
+ int err = 0;
+
+ pr_info("%s\n", __func__);
+ gpio_set_value_cansleep(pdata->gpio, 1);
+ pdata->pwr_dev = AD5823_PWR_DEV_ON;
+
+ return err;
+}
+
+static int apalis_tk1_ad5823_power_off(struct ad5823_platform_data *pdata)
+{
+ pr_info("%s\n", __func__);
+ gpio_set_value_cansleep(pdata->gpio, 0);
+ pdata->pwr_dev = AD5823_PWR_DEV_OFF;
+
+ return 0;
+}
+
+static struct ad5823_platform_data apalis_tk1_ad5823_pdata = {
+ .gpio = CAM_AF_PWDN,
+ .power_on = apalis_tk1_ad5823_power_on,
+ .power_off = apalis_tk1_ad5823_power_off,
+};
+
+static struct camera_data_blob apalis_tk1_camera_lut[] = {
+ {"apalis_tk1_imx135_pdata", &apalis_tk1_imx135_data},
+ {"apalis_tk1_imx185_pdata", &apalis_tk1_imx185_data},
+ {"apalis_tk1_dw9718_pdata", &apalis_tk1_dw9718_data},
+ {"apalis_tk1_ar0261_pdata", &apalis_tk1_ar0261_data},
+ {"apalis_tk1_mt9m114_pdata", &apalis_tk1_mt9m114_pdata},
+ {"apalis_tk1_ov5693_pdata", &apalis_tk1_ov5693_pdata},
+ {"apalis_tk1_ad5823_pdata", &apalis_tk1_ad5823_pdata},
+ {"apalis_tk1_as3648_pdata", &apalis_tk1_as3648_data},
+ {"apalis_tk1_ov7695_pdata", &apalis_tk1_ov7695_pdata},
+ {"apalis_tk1_ov5693f_pdata", &apalis_tk1_ov5693_front_pdata},
+ {"apalis_tk1_ar0330_pdata", &apalis_tk1_ar0330_data},
+ {"apalis_tk1_ar0330_front_pdata", &apalis_tk1_ar0330_front_data},
+ {"apalis_tk1_ov4689_pdata", &apalis_tk1_ov4689_data},
+ {"apalis_tk1_ar1335_pdata", &apalis_tk1_ar1335_data},
+ {},
+};
+
+void __init apalis_tk1_camera_auxdata(void *data)
+{
+ struct of_dev_auxdata *aux_lut = data;
+ while (aux_lut && aux_lut->compatible) {
+ if (!strcmp(aux_lut->compatible, "nvidia,tegra124-camera")) {
+ pr_info("%s: update camera lookup table.\n", __func__);
+ aux_lut->platform_data = apalis_tk1_camera_lut;
+ }
+ aux_lut++;
+ }
+}
+
+static int apalis_tk1_camera_init(void)
+{
+ struct board_info board_info;
+
+ pr_debug("%s: ++\n", __func__);
+ tegra_get_board_info(&board_info);
+
+ /* put CSIA/B/C/D/E IOs into DPD mode to
+ * save additional power for apalis_tk1
+ */
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ tegra_io_dpd_enable(&csie_io);
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_PLATFORM)
+ platform_device_register(&apalis_tk1_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_IMX135)
+ platform_device_register(&apalis_tk1_imx135_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0261)
+ platform_device_register(&apalis_tk1_ar0261_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0330)
+ platform_device_register(&apalis_tk1_ar0330_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AP1302)
+ platform_device_register(&apalis_tk1_ap1302_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_OV5640)
+ platform_device_register(&apalis_tk1_ov5640_soc_camera_device);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_TC358743)
+ platform_device_register(&apalis_tk1_tc358743_soc_camera_device_a);
+ platform_device_register(&apalis_tk1_tc358743_soc_camera_device_b);
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_ADV7280)
+ platform_device_register(&apalis_tk1_adv7280_soc_camera_device_c);
+#endif
+
+ return 0;
+}
+
+static struct pid_thermal_gov_params cpu_pid_params = {
+ .max_err_temp = 4000,
+ .max_err_gain = 1000,
+
+ .gain_p = 1000,
+ .gain_d = 0,
+
+ .up_compensation = 15,
+ .down_compensation = 15,
+};
+
+static struct thermal_zone_params cpu_tzp = {
+ .governor_name = "pid_thermal_gov",
+ .governor_params = &cpu_pid_params,
+};
+
+static struct thermal_zone_params board_tzp = {
+ .governor_name = "pid_thermal_gov"
+};
+
+static struct throttle_table cpu_throttle_table[] = {
+ /* CPU_THROT_LOW cannot be used by other than CPU */
+ /* CPU, GPU, C2BUS, C3BUS, SCLK, EMC */
+ { {2295000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2269500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2244000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2218500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2193000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2167500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2142000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2116500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2091000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2065500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2040000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {2014500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1989000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1963500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1938000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1912500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1887000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1861500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1836000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1810500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1785000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1759500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1734000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1708500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1683000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1657500, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1632000, NO_CAP, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1606500, 790000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1581000, 776000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1555500, 762000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1530000, 749000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1504500, 735000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1479000, 721000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1453500, 707000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1428000, 693000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1402500, 679000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1377000, 666000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1351500, 652000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1326000, 638000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1300500, 624000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1275000, 610000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1249500, 596000, NO_CAP, NO_CAP, NO_CAP, NO_CAP} },
+ { {1224000, 582000, NO_CAP, NO_CAP, NO_CAP, 792000} },
+ { {1198500, 569000, NO_CAP, NO_CAP, NO_CAP, 792000} },
+ { {1173000, 555000, NO_CAP, NO_CAP, 360000, 792000} },
+ { {1147500, 541000, NO_CAP, NO_CAP, 360000, 792000} },
+ { {1122000, 527000, NO_CAP, 684000, 360000, 792000} },
+ { {1096500, 513000, 444000, 684000, 360000, 792000} },
+ { {1071000, 499000, 444000, 684000, 360000, 792000} },
+ { {1045500, 486000, 444000, 684000, 360000, 792000} },
+ { {1020000, 472000, 444000, 684000, 324000, 792000} },
+ { { 994500, 458000, 444000, 684000, 324000, 792000} },
+ { { 969000, 444000, 444000, 600000, 324000, 792000} },
+ { { 943500, 430000, 444000, 600000, 324000, 792000} },
+ { { 918000, 416000, 396000, 600000, 324000, 792000} },
+ { { 892500, 402000, 396000, 600000, 324000, 792000} },
+ { { 867000, 389000, 396000, 600000, 324000, 792000} },
+ { { 841500, 375000, 396000, 600000, 288000, 792000} },
+ { { 816000, 361000, 396000, 600000, 288000, 792000} },
+ { { 790500, 347000, 396000, 600000, 288000, 792000} },
+ { { 765000, 333000, 396000, 504000, 288000, 792000} },
+ { { 739500, 319000, 348000, 504000, 288000, 792000} },
+ { { 714000, 306000, 348000, 504000, 288000, 624000} },
+ { { 688500, 292000, 348000, 504000, 288000, 624000} },
+ { { 663000, 278000, 348000, 504000, 288000, 624000} },
+ { { 637500, 264000, 348000, 504000, 288000, 624000} },
+ { { 612000, 250000, 348000, 504000, 252000, 624000} },
+ { { 586500, 236000, 348000, 504000, 252000, 624000} },
+ { { 561000, 222000, 348000, 420000, 252000, 624000} },
+ { { 535500, 209000, 288000, 420000, 252000, 624000} },
+ { { 510000, 195000, 288000, 420000, 252000, 624000} },
+ { { 484500, 181000, 288000, 420000, 252000, 624000} },
+ { { 459000, 167000, 288000, 420000, 252000, 624000} },
+ { { 433500, 153000, 288000, 420000, 252000, 396000} },
+ { { 408000, 139000, 288000, 420000, 252000, 396000} },
+ { { 382500, 126000, 288000, 420000, 252000, 396000} },
+ { { 357000, 112000, 288000, 420000, 252000, 396000} },
+ { { 331500, 98000, 288000, 420000, 252000, 396000} },
+ { { 306000, 84000, 288000, 420000, 252000, 396000} },
+ { { 280500, 84000, 288000, 420000, 252000, 396000} },
+ { { 255000, 84000, 288000, 420000, 252000, 396000} },
+ { { 229500, 84000, 288000, 420000, 252000, 396000} },
+ { { 204000, 84000, 288000, 420000, 252000, 396000} },
+};
+
+static struct balanced_throttle cpu_throttle = {
+ .throt_tab_size = ARRAY_SIZE(cpu_throttle_table),
+ .throt_tab = cpu_throttle_table,
+};
+
+static struct throttle_table gpu_throttle_table[] = {
+ /* CPU_THROT_LOW cannot be used by other than CPU */
+ /* CPU, GPU, C2BUS, C3BUS, SCLK, EMC */
+ { {2295000, 782800, 480000, 756000, 384000, 924000} },
+ { {2269500, 772200, 480000, 756000, 384000, 924000} },
+ { {2244000, 761600, 480000, 756000, 384000, 924000} },
+ { {2218500, 751100, 480000, 756000, 384000, 924000} },
+ { {2193000, 740500, 480000, 756000, 384000, 924000} },
+ { {2167500, 729900, 480000, 756000, 384000, 924000} },
+ { {2142000, 719300, 480000, 756000, 384000, 924000} },
+ { {2116500, 708700, 480000, 756000, 384000, 924000} },
+ { {2091000, 698100, 480000, 756000, 384000, 924000} },
+ { {2065500, 687500, 480000, 756000, 384000, 924000} },
+ { {2040000, 676900, 480000, 756000, 384000, 924000} },
+ { {2014500, 666000, 480000, 756000, 384000, 924000} },
+ { {1989000, 656000, 480000, 756000, 384000, 924000} },
+ { {1963500, 645000, 480000, 756000, 384000, 924000} },
+ { {1938000, 635000, 480000, 756000, 384000, 924000} },
+ { {1912500, 624000, 480000, 756000, 384000, 924000} },
+ { {1887000, 613000, 480000, 756000, 384000, 924000} },
+ { {1861500, 603000, 480000, 756000, 384000, 924000} },
+ { {1836000, 592000, 480000, 756000, 384000, 924000} },
+ { {1810500, 582000, 480000, 756000, 384000, 924000} },
+ { {1785000, 571000, 480000, 756000, 384000, 924000} },
+ { {1759500, 560000, 480000, 756000, 384000, 924000} },
+ { {1734000, 550000, 480000, 756000, 384000, 924000} },
+ { {1708500, 539000, 480000, 756000, 384000, 924000} },
+ { {1683000, 529000, 480000, 756000, 384000, 924000} },
+ { {1657500, 518000, 480000, 756000, 384000, 924000} },
+ { {1632000, 508000, 480000, 756000, 384000, 924000} },
+ { {1606500, 497000, 480000, 756000, 384000, 924000} },
+ { {1581000, 486000, 480000, 756000, 384000, 924000} },
+ { {1555500, 476000, 480000, 756000, 384000, 924000} },
+ { {1530000, 465000, 480000, 756000, 384000, 924000} },
+ { {1504500, 455000, 480000, 756000, 384000, 924000} },
+ { {1479000, 444000, 480000, 756000, 384000, 924000} },
+ { {1453500, 433000, 480000, 756000, 384000, 924000} },
+ { {1428000, 423000, 480000, 756000, 384000, 924000} },
+ { {1402500, 412000, 480000, 756000, 384000, 924000} },
+ { {1377000, 402000, 480000, 756000, 384000, 924000} },
+ { {1351500, 391000, 480000, 756000, 384000, 924000} },
+ { {1326000, 380000, 480000, 756000, 384000, 924000} },
+ { {1300500, 370000, 480000, 756000, 384000, 924000} },
+ { {1275000, 359000, 480000, 756000, 384000, 924000} },
+ { {1249500, 349000, 480000, 756000, 384000, 924000} },
+ { {1224000, 338000, 480000, 756000, 384000, 792000} },
+ { {1198500, 328000, 480000, 756000, 384000, 792000} },
+ { {1173000, 317000, 480000, 756000, 360000, 792000} },
+ { {1147500, 306000, 480000, 756000, 360000, 792000} },
+ { {1122000, 296000, 480000, 684000, 360000, 792000} },
+ { {1096500, 285000, 444000, 684000, 360000, 792000} },
+ { {1071000, 275000, 444000, 684000, 360000, 792000} },
+ { {1045500, 264000, 444000, 684000, 360000, 792000} },
+ { {1020000, 253000, 444000, 684000, 324000, 792000} },
+ { { 994500, 243000, 444000, 684000, 324000, 792000} },
+ { { 969000, 232000, 444000, 600000, 324000, 792000} },
+ { { 943500, 222000, 444000, 600000, 324000, 792000} },
+ { { 918000, 211000, 396000, 600000, 324000, 792000} },
+ { { 892500, 200000, 396000, 600000, 324000, 792000} },
+ { { 867000, 190000, 396000, 600000, 324000, 792000} },
+ { { 841500, 179000, 396000, 600000, 288000, 792000} },
+ { { 816000, 169000, 396000, 600000, 288000, 792000} },
+ { { 790500, 158000, 396000, 600000, 288000, 792000} },
+ { { 765000, 148000, 396000, 504000, 288000, 792000} },
+ { { 739500, 137000, 348000, 504000, 288000, 792000} },
+ { { 714000, 126000, 348000, 504000, 288000, 624000} },
+ { { 688500, 116000, 348000, 504000, 288000, 624000} },
+ { { 663000, 105000, 348000, 504000, 288000, 624000} },
+ { { 637500, 95000, 348000, 504000, 288000, 624000} },
+ { { 612000, 84000, 348000, 504000, 252000, 624000} },
+ { { 586500, 84000, 348000, 504000, 252000, 624000} },
+ { { 561000, 84000, 348000, 420000, 252000, 624000} },
+ { { 535500, 84000, 288000, 420000, 252000, 624000} },
+ { { 510000, 84000, 288000, 420000, 252000, 624000} },
+ { { 484500, 84000, 288000, 420000, 252000, 624000} },
+ { { 459000, 84000, 288000, 420000, 252000, 624000} },
+ { { 433500, 84000, 288000, 420000, 252000, 396000} },
+ { { 408000, 84000, 288000, 420000, 252000, 396000} },
+ { { 382500, 84000, 288000, 420000, 252000, 396000} },
+ { { 357000, 84000, 288000, 420000, 252000, 396000} },
+ { { 331500, 84000, 288000, 420000, 252000, 396000} },
+ { { 306000, 84000, 288000, 420000, 252000, 396000} },
+ { { 280500, 84000, 288000, 420000, 252000, 396000} },
+ { { 255000, 84000, 288000, 420000, 252000, 396000} },
+ { { 229500, 84000, 288000, 420000, 252000, 396000} },
+ { { 204000, 84000, 288000, 420000, 252000, 396000} },
+};
+
+static struct balanced_throttle gpu_throttle = {
+ .throt_tab_size = ARRAY_SIZE(gpu_throttle_table),
+ .throt_tab = gpu_throttle_table,
+};
+
+/* throttle table that sets all clocks to approximately 50% of their max */
+static struct throttle_table emergency_throttle_table[] = {
+ /* CPU, GPU, C2BUS, C3BUS, SCLK, EMC */
+ { {1122000, 391000, 288000, 420000, 252000, 396000} },
+};
+
+static struct balanced_throttle emergency_throttle = {
+ .throt_tab_size = ARRAY_SIZE(emergency_throttle_table),
+ .throt_tab = emergency_throttle_table,
+};
+
+static int __init apalis_tk1_balanced_throttle_init(void)
+{
+ if (of_machine_is_compatible("toradex,apalis-tk1")) {
+ if (!balanced_throttle_register(&cpu_throttle, "cpu-balanced"))
+ pr_err
+ ("balanced_throttle_register 'cpu-balanced' FAILED.\n");
+ if (!balanced_throttle_register(&gpu_throttle, "gpu-balanced"))
+ pr_err
+ ("balanced_throttle_register 'gpu-balanced' FAILED.\n");
+ if (!balanced_throttle_register(&emergency_throttle,
+ "emergency-balanced"))
+ pr_err
+ ("balanced_throttle_register 'emergency-balanced' FAILED\n");
+ }
+
+ return 0;
+}
+late_initcall(apalis_tk1_balanced_throttle_init);
+
+static struct nct1008_platform_data apalis_tk1_nct72_pdata = {
+ .loc_name = "tegra",
+ .supported_hwrev = true,
+ .conv_rate = 0x06, /* 4Hz conversion rate */
+ .offset = 0,
+ .extended_range = true,
+
+ .sensors = {
+ [LOC] = {
+ .tzp = &board_tzp,
+ .shutdown_limit = 120, /* C */
+ .passive_delay = 1000,
+ .num_trips = 1,
+ .trips = {
+ {
+ .cdev_type = "therm_est_activ",
+ .trip_temp = 40000,
+ .trip_type = THERMAL_TRIP_ACTIVE,
+ .hysteresis = 1000,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ .mask = 1,
+ },
+ },
+ },
+ [EXT] = {
+ .tzp = &cpu_tzp,
+ .shutdown_limit = 95, /* C */
+ .passive_delay = 1000,
+ .num_trips = 2,
+ .trips = {
+ {
+ .cdev_type = "shutdown_warning",
+ .trip_temp = 93000,
+ .trip_type = THERMAL_TRIP_PASSIVE,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ .mask = 0,
+ },
+ {
+ .cdev_type = "cpu-balanced",
+ .trip_temp = 83000,
+ .trip_type = THERMAL_TRIP_PASSIVE,
+ .upper = THERMAL_NO_LIMIT,
+ .lower = THERMAL_NO_LIMIT,
+ .hysteresis = 1000,
+ .mask = 1,
+ },
+ }
+ }
+ }
+};
+
+static struct i2c_board_info apalis_tk1_i2c_nct72_board_info[] = {
+ {
+ I2C_BOARD_INFO("nct72", 0x4c),
+ .platform_data = &apalis_tk1_nct72_pdata,
+ .irq = -1,
+ },
+};
+
+static int apalis_tk1_nct72_init(void)
+{
+ int nct72_port = TEGRA_GPIO_PI6;
+ int ret = 0;
+ int i;
+ struct thermal_trip_info *trip_state;
+
+ /* raise NCT's thresholds if soctherm CP,FT fuses are ok */
+ if ((tegra_fuse_calib_base_get_cp(NULL, NULL) >= 0) &&
+ (tegra_fuse_calib_base_get_ft(NULL, NULL) >= 0)) {
+ apalis_tk1_nct72_pdata.sensors[EXT].shutdown_limit += 20;
+ for (i = 0; i < apalis_tk1_nct72_pdata.sensors[EXT].num_trips;
+ i++) {
+ trip_state =
+ &apalis_tk1_nct72_pdata.sensors[EXT].trips[i];
+ if (!strncmp
+ (trip_state->cdev_type, "cpu-balanced",
+ THERMAL_NAME_LENGTH)) {
+ trip_state->cdev_type = "_none_";
+ break;
+ }
+ }
+ } else {
+ tegra_platform_edp_init(
+ apalis_tk1_nct72_pdata.sensors[EXT].trips,
+ &apalis_tk1_nct72_pdata.sensors[EXT].num_trips,
+ 12000); /* edp temperature margin */
+ tegra_add_cpu_vmax_trips(apalis_tk1_nct72_pdata.sensors[EXT].
+ trips,
+ &apalis_tk1_nct72_pdata.sensors[EXT].
+ num_trips);
+ tegra_add_tgpu_trips(apalis_tk1_nct72_pdata.sensors[EXT].trips,
+ &apalis_tk1_nct72_pdata.sensors[EXT].
+ num_trips);
+ tegra_add_vc_trips(apalis_tk1_nct72_pdata.sensors[EXT].trips,
+ &apalis_tk1_nct72_pdata.sensors[EXT].
+ num_trips);
+ tegra_add_core_vmax_trips(apalis_tk1_nct72_pdata.sensors[EXT].
+ trips,
+ &apalis_tk1_nct72_pdata.sensors[EXT].
+ num_trips);
+ }
+
+ apalis_tk1_i2c_nct72_board_info[0].irq = gpio_to_irq(nct72_port);
+
+ ret = gpio_request(nct72_port, "temp_alert");
+ if (ret < 0)
+ return ret;
+
+ ret = gpio_direction_input(nct72_port);
+ if (ret < 0) {
+ pr_info("%s: calling gpio_free(nct72_port)", __func__);
+ gpio_free(nct72_port);
+ }
+
+ apalis_tk1_nct72_pdata.sensors[EXT].shutdown_limit = 105;
+ apalis_tk1_nct72_pdata.sensors[LOC].shutdown_limit = 100;
+ i2c_register_board_info(4, apalis_tk1_i2c_nct72_board_info,
+ 1); /* only register device[0] */
+
+ return ret;
+}
+
+struct ntc_thermistor_adc_table {
+ int temp; /* degree C */
+ int adc;
+};
+
+int __init apalis_tk1_sensors_init(void)
+{
+ apalis_tk1_nct72_init();
+
+ apalis_tk1_camera_init();
+
+ return 0;
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1-sysedp.c b/arch/arm/mach-tegra/board-apalis-tk1-sysedp.c
new file mode 100644
index 000000000000..298440457afe
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1-sysedp.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016 Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/sysedp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tegra_edp.h>
+#include <linux/power_supply.h>
+#include <mach/edp.h>
+#include "board.h"
+#include "board-panel.h"
+
+/* --- EDP consumers data --- */
+/* TODO static unsigned int ov5693_states[] = { 0, 300 };*/
+static unsigned int mt9m114_states[] = { 0, 150 };
+static unsigned int sdhci_states[] = { 0, 966 };
+static unsigned int speaker_states[] = { 0, 1080 };
+static unsigned int wifi_states[] = { 0, 3070 };
+
+/* 10 inch panel */
+static unsigned int pwm_backlight_states[] = {
+ 0, 425, 851, 1276, 1702, 2127, 2553, 2978, 3404, 3829, 4255
+};
+
+/* TODO
+static unsigned int as364x_states[] = {
+ 0, 350, 700, 1050, 1400, 1750, 2100, 2450, 2800, 3150, 3500
+};
+*/
+
+static struct sysedp_consumer_data apalis_tk1_sysedp_consumer_data[] = {
+ /* TODO SYSEDP_CONSUMER_DATA("ov5693", ov5693_states), */
+ SYSEDP_CONSUMER_DATA("mt9m114", mt9m114_states),
+ SYSEDP_CONSUMER_DATA("speaker", speaker_states),
+ SYSEDP_CONSUMER_DATA("wifi", wifi_states),
+ SYSEDP_CONSUMER_DATA("pwm-backlight", pwm_backlight_states),
+ SYSEDP_CONSUMER_DATA("sdhci-tegra.2", sdhci_states),
+ SYSEDP_CONSUMER_DATA("sdhci-tegra.3", sdhci_states),
+ /* TODO SYSEDP_CONSUMER_DATA("as364x", as364x_states), */
+};
+
+static struct sysedp_platform_data apalis_tk1_sysedp_platform_data = {
+ .consumer_data = apalis_tk1_sysedp_consumer_data,
+ .consumer_data_size = ARRAY_SIZE(apalis_tk1_sysedp_consumer_data),
+ .margin = 0,
+};
+
+static struct platform_device apalis_tk1_sysedp_device = {
+ .name = "sysedp",
+ .id = -1,
+ .dev = {.platform_data = &apalis_tk1_sysedp_platform_data}
+};
+
+void __init apalis_tk1_new_sysedp_init(void)
+{
+ int r;
+
+ r = platform_device_register(&apalis_tk1_sysedp_device);
+ WARN_ON(r);
+}
+
+/* --- Battery monitor data --- */
+static struct sysedp_batmon_ibat_lut apalis_tk1_ibat_lut[] = {
+/*-- temp in deci-C, current in milli ampere --*/
+ {600, 9750},
+ {-300, 9750}
+};
+
+/* Values for Leyden HY-LDN-N-TD battery */
+/* 45C 23C 10C 5C 0C -20 */
+static int rbat_data[] = {
+ 100000, 120000, 140000, 170000, 190000, 210000, /* 100% */
+ 100000, 120000, 150000, 170000, 190000, 210000, /* 55% */
+ 100000, 130000, 150000, 170000, 200000, 210000, /* 50% */
+ 110000, 130000, 160000, 170000, 200000, 210000, /* 10% */
+ 120000, 140000, 170000, 180000, 210000, 220000, /* 0% */
+};
+static int rbat_temp_axis[] = { 45, 23, 10, 5, 0, -20 };
+static int rbat_capacity_axis[] = { 100, 55, 50, 10, 0 };
+
+struct sysedp_batmon_rbat_lut apalis_tk1_rbat_lut = {
+ .temp_axis = rbat_temp_axis,
+ .temp_size = ARRAY_SIZE(rbat_temp_axis),
+ .capacity_axis = rbat_capacity_axis,
+ .capacity_size = ARRAY_SIZE(rbat_capacity_axis),
+ .data = rbat_data,
+ .data_size = ARRAY_SIZE(rbat_data),
+};
+
+/* Fuel Gauge is BQ20z45 (SBS battery) */
+static struct sysedp_batmon_ocv_lut apalis_tk1_ocv_lut[] = {
+ /*SOC, OCV in micro volt */
+ {100, 8372010},
+ {95, 8163880},
+ {90, 8069280},
+ {85, 7970700},
+ {80, 7894100},
+ {75, 7820860},
+ {70, 7751890},
+ {65, 7691770},
+ {60, 7641110},
+ {55, 7598990},
+ {50, 7564200},
+ {45, 7534290},
+ {40, 7511410},
+ {35, 7491870},
+ {30, 7468380},
+ {25, 7435720},
+ {20, 7388720},
+ {15, 7338370},
+ {10, 7219650},
+ {0, 5999850},
+};
+
+static struct sysedp_batmon_calc_platform_data apalis_tk1_batmon_pdata = {
+ .power_supply = "sbs-battery",
+ .r_const = 70000, /* in micro ohm */
+ .vsys_min = 5880000, /* in micro volt */
+ .ibat_lut = apalis_tk1_ibat_lut,
+ .rbat_lut = &apalis_tk1_rbat_lut,
+ .ocv_lut = apalis_tk1_ocv_lut,
+};
+
+static struct platform_device apalis_tk1_batmon_device = {
+ .name = "sysedp_batmon_calc",
+ .id = -1,
+ .dev = {.platform_data = &apalis_tk1_batmon_pdata}
+};
+
+void __init apalis_tk1_sysedp_batmon_init(void)
+{
+ int r;
+
+ if (get_power_supply_type() != POWER_SUPPLY_TYPE_BATTERY) {
+ /* modify platform data on-the-fly to enable virtual battery */
+ apalis_tk1_batmon_pdata.power_supply = "test_battery";
+ apalis_tk1_batmon_pdata.update_interval = 2000;
+ }
+
+ r = platform_device_register(&apalis_tk1_batmon_device);
+ WARN_ON(r);
+}
+
+static struct tegra_sysedp_platform_data
+ apalis_tk1_sysedp_dynamic_capping_platdata = {
+ .core_gain = 100,
+ .init_req_watts = 20000,
+};
+
+static struct platform_device apalis_tk1_sysedp_dynamic_capping = {
+ .name = "sysedp_dynamic_capping",
+ .id = -1,
+ .dev = {.platform_data = &apalis_tk1_sysedp_dynamic_capping_platdata}
+};
+
+void __init apalis_tk1_sysedp_dynamic_capping_init(void)
+{
+ int r;
+ struct tegra_sysedp_corecap *corecap;
+ unsigned int corecap_size;
+
+ corecap = tegra_get_sysedp_corecap(&corecap_size);
+ if (!corecap) {
+ WARN_ON(1);
+ return;
+ }
+ apalis_tk1_sysedp_dynamic_capping_platdata.corecap = corecap;
+ apalis_tk1_sysedp_dynamic_capping_platdata.corecap_size = corecap_size;
+
+ apalis_tk1_sysedp_dynamic_capping_platdata.cpufreq_lim =
+ tegra_get_system_edp_entries
+ (&apalis_tk1_sysedp_dynamic_capping_platdata.cpufreq_lim_size);
+ if (!apalis_tk1_sysedp_dynamic_capping_platdata.cpufreq_lim) {
+ WARN_ON(1);
+ return;
+ }
+
+ r = platform_device_register(&apalis_tk1_sysedp_dynamic_capping);
+ WARN_ON(r);
+}
diff --git a/arch/arm/mach-tegra/board-apalis-tk1.c b/arch/arm/mach-tegra/board-apalis-tk1.c
new file mode 100644
index 000000000000..d88ccb9889b8
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1.c
@@ -0,0 +1,645 @@
+/*
+ * arch/arm/mach-tegra/board-apalis-tk1.c
+ *
+ * Copyright (c) 2016, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/serial_8250.h>
+#include <linux/i2c.h>
+#include <linux/i2c/i2c-hid.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/i2c-tegra.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/platform_data/tegra_usb.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/rm31080a_ts.h>
+#include <linux/maxim_sti.h>
+#include <linux/memblock.h>
+#include <linux/spi/spi-tegra.h>
+#include <linux/nfc/pn544.h>
+#include <linux/rfkill-gpio.h>
+#include <linux/skbuff.h>
+#include <linux/ti_wilink_st.h>
+#include <linux/regulator/consumer.h>
+#include <linux/smb349-charger.h>
+#include <linux/max17048_battery.h>
+#include <linux/leds.h>
+#include <linux/i2c/at24.h>
+#include <linux/of_platform.h>
+#include <linux/i2c.h>
+#include <linux/i2c-tegra.h>
+#include <linux/platform_data/serial-tegra.h>
+#include <linux/edp.h>
+#include <linux/usb/tegra_usb_phy.h>
+#include <linux/mfd/palmas.h>
+#include <linux/clk/tegra.h>
+#include <media/tegra_dtv.h>
+#include <linux/clocksource.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/tegra.h>
+#include <linux/tegra-soc.h>
+#include <linux/tegra_fiq_debugger.h>
+#include <linux/platform_data/tegra_usb_modem_power.h>
+#include <linux/platform_data/tegra_ahci.h>
+#include <linux/irqchip/tegra.h>
+#include <sound/max98090.h>
+#include <linux/pci.h>
+
+#include <mach/irqs.h>
+#include <mach/pinmux.h>
+#include <mach/pinmux-t12.h>
+#include <mach/io_dpd.h>
+#include <mach/i2s.h>
+#include <mach/isomgr.h>
+#include <mach/tegra_asoc_pdata.h>
+#include <mach/dc.h>
+#include <mach/tegra_usb_pad_ctrl.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/gpio-tegra.h>
+#include <mach/xusb.h>
+
+#include "board.h"
+#include "board-apalis-tk1.h"
+#include "board-common.h"
+#include "board-touch-raydium.h"
+#include "board-touch-maxim_sti.h"
+#include "clock.h"
+#include "common.h"
+#include "devices.h"
+#include "gpio-names.h"
+#include "iomap.h"
+#include "pm.h"
+#include "tegra-board-id.h"
+#include "tegra-of-dev-auxdata.h"
+
+static struct i2c_board_info apalis_tk1_sgtl5000_board_info = {
+ /* SGTL5000 audio codec */
+ I2C_BOARD_INFO("sgtl5000", 0x0a),
+};
+
+static __initdata struct tegra_clk_init_table apalis_tk1_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "pll_m", NULL, 0, false},
+ { "hda", "pll_p", 108000000, false},
+ { "hda2codec_2x", "pll_p", 48000000, false},
+ { "pwm", "pll_p", 48000000, false},
+ { "pll_a", "pll_p_out1", 282240000, false},
+ { "pll_a_out0", "pll_a", 12288000, false},
+ { "i2s2", "pll_a_out0", 0, false},
+ { "spdif_out", "pll_a_out0", 0, false},
+ { "d_audio", "pll_a_out0", 12288000, false},
+ { "dam0", "clk_m", 12000000, false},
+ { "dam1", "clk_m", 12000000, false},
+ { "dam2", "clk_m", 12000000, false},
+ { "audio2", "i2s2_sync", 0, false},
+ { "vi_sensor", "pll_p", 150000000, false},
+ { "vi_sensor2", "pll_p", 150000000, false},
+ { "cilab", "pll_p", 150000000, false},
+ { "cilcd", "pll_p", 150000000, false},
+ { "cile", "pll_p", 150000000, false},
+ { "i2c1", "pll_p", 3200000, false},
+ { "i2c2", "pll_p", 3200000, false},
+ { "i2c3", "pll_p", 3200000, false},
+ { "i2c4", "pll_p", 3200000, false},
+ { "i2c5", "pll_p", 3200000, false},
+ { "sbc1", "pll_p", 25000000, false},
+ { "sbc2", "clk_m", 12000000, false},
+ { "sbc3", "pll_p", 25000000, false},
+ { "sbc4", "pll_p", 25000000, false},
+ { "sbc5", "pll_p", 25000000, false},
+ { "sbc6", "pll_p", 25000000, false},
+ { "uarta", "pll_p", 408000000, false},
+ { "uartb", "pll_p", 408000000, false},
+ { "uartc", "pll_p", 408000000, false},
+ { "uartd", "pll_p", 408000000, false},
+ { NULL, NULL, 0, 0},
+};
+
+static void apalis_tk1_i2c_init(void)
+{
+ i2c_register_board_info(4, &apalis_tk1_sgtl5000_board_info, 1);
+}
+
+static struct tegra_asoc_platform_data apalis_tk1_audio_pdata_sgtl5000 = {
+ .gpio_hp_det = -1,
+ .gpio_ldo1_en = -1,
+ .gpio_spkr_en = -1,
+ .gpio_int_mic_en = -1,
+ .gpio_ext_mic_en = -1,
+ .gpio_hp_mute = -1,
+ .gpio_codec1 = -1,
+ .gpio_codec2 = -1,
+ .gpio_codec3 = -1,
+ .i2s_param[HIFI_CODEC] = {
+ .audio_port_id = 1, /* index of below registered
+ tegra_i2s_device plus one if HDA codec
+ is activated as well */
+ .is_i2s_master = 1, /* meaning TK1 SoC is I2S master */
+ .i2s_mode = TEGRA_DAIFMT_I2S,
+ .sample_size = 16,
+ .channels = 2,
+ .bit_clk = 1536000,
+ },
+ .i2s_param[BT_SCO] = {
+ .audio_port_id = -1,
+ },
+ .i2s_param[BASEBAND] = {
+ .audio_port_id = -1,
+ },
+};
+
+static struct platform_device apalis_tk1_audio_device_sgtl5000 = {
+ .name = "tegra-snd-apalis-tk1-sgtl5000",
+ .id = 0,
+ .dev = {
+ .platform_data = &apalis_tk1_audio_pdata_sgtl5000,
+ },
+};
+
+static struct tegra_serial_platform_data apalis_tk1_uarta_pdata = {
+ .dma_req_selector = 8,
+ .modem_interrupt = false,
+};
+
+static void __init apalis_tk1_uart_init(void)
+{
+ tegra_uarta_device.dev.platform_data = &apalis_tk1_uarta_pdata;
+ if (!is_tegra_debug_uartport_hs()) {
+ int debug_port_id = uart_console_debug_init(0);
+ if (debug_port_id < 0)
+ return;
+
+#ifdef CONFIG_TEGRA_FIQ_DEBUGGER
+#if !defined(CONFIG_TRUSTED_FOUNDATIONS) && defined(CONFIG_ARCH_TEGRA_12x_SOC) \
+ && defined(CONFIG_FIQ_DEBUGGER)
+ tegra_serial_debug_init(TEGRA_UARTA_BASE, INT_WDT_AVP, NULL, -1, -1);
+ platform_device_register(uart_console_debug_device);
+#else
+ tegra_serial_debug_init(TEGRA_UARTA_BASE, INT_WDT_CPU, NULL, -1, -1);
+#endif
+#else
+ platform_device_register(uart_console_debug_device);
+#endif
+ } else {
+ tegra_uarta_device.dev.platform_data = &apalis_tk1_uarta_pdata;
+ platform_device_register(&tegra_uarta_device);
+ }
+}
+
+static struct resource tegra_rtc_resources[] = {
+ [0] = {
+ .start = TEGRA_RTC_BASE,
+ .end = TEGRA_RTC_BASE + TEGRA_RTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = INT_RTC,
+ .end = INT_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device tegra_rtc_device = {
+ .name = "tegra_rtc",
+ .id = -1,
+ .resource = tegra_rtc_resources,
+ .num_resources = ARRAY_SIZE(tegra_rtc_resources),
+};
+
+static struct platform_device *apalis_tk1_devices[] __initdata = {
+ &tegra_pmu_device,
+ &tegra_rtc_device,
+#if defined(CONFIG_TEGRA_WAKEUP_MONITOR)
+ &tegratab_tegra_wakeup_monitor_device,
+#endif
+ &tegra_udc_device,
+#if defined(CONFIG_TEGRA_WATCHDOG)
+ &tegra_wdt0_device,
+#endif
+#if defined(CONFIG_TEGRA_AVP)
+ &tegra_avp_device,
+#endif
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && !defined(CONFIG_USE_OF)
+ &tegra12_se_device,
+#endif
+ &tegra_ahub_device,
+//TBD: all DAMs needed?
+ &tegra_dam_device0,
+ &tegra_dam_device1,
+ &tegra_dam_device2,
+ &tegra_i2s_device2,
+ &tegra_spdif_device,
+ &spdif_dit_device,
+ &tegra_hda_device,
+ &tegra_offload_device,
+ &tegra30_avp_audio_device,
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_AES)
+ &tegra_aes_device,
+#endif
+ &tegra_hier_ictlr_device,
+};
+
+static struct tegra_usb_platform_data tegra_udc_pdata = {
+ .port_otg = true,
+ .has_hostpc = true,
+ .unaligned_dma_buf_supported = false,
+ .phy_intf = TEGRA_USB_PHY_INTF_UTMI,
+ .op_mode = TEGRA_USB_OPMODE_DEVICE,
+ .u_data.dev = {
+ .vbus_pmu_irq = 0,
+ .vbus_gpio = -1,
+ .charging_supported = false,
+ .remote_wakeup_supported = false,
+ },
+ .u_cfg.utmi = {
+ .hssync_start_delay = 0,
+ .elastic_limit = 16,
+ .idle_wait_delay = 17,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ },
+};
+
+static struct tegra_usb_platform_data tegra_ehci1_utmi_pdata = {
+ .port_otg = true,
+ .has_hostpc = true,
+ .unaligned_dma_buf_supported = false,
+ .phy_intf = TEGRA_USB_PHY_INTF_UTMI,
+ .op_mode = TEGRA_USB_OPMODE_HOST,
+ .u_data.host = {
+ .vbus_gpio = -1,
+ .hot_plug = false,
+ .remote_wakeup_supported = true,
+ .power_off_on_suspend = true,
+ },
+ .u_cfg.utmi = {
+ .hssync_start_delay = 0,
+ .elastic_limit = 16,
+ .idle_wait_delay = 17,
+ .term_range_adj = 6,
+ .xcvr_setup = 15,
+ .xcvr_lsfslew = 0,
+ .xcvr_lsrslew = 3,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .vbus_oc_map = 0x4,
+ .xcvr_hsslew_lsb = 2,
+ },
+};
+
+static struct tegra_usb_platform_data tegra_ehci2_utmi_pdata = {
+ .port_otg = false,
+ .has_hostpc = true,
+ .unaligned_dma_buf_supported = false,
+ .phy_intf = TEGRA_USB_PHY_INTF_UTMI,
+ .op_mode = TEGRA_USB_OPMODE_HOST,
+ .u_data.host = {
+ .vbus_gpio = -1,
+ .hot_plug = false,
+ .remote_wakeup_supported = true,
+ .power_off_on_suspend = true,
+ },
+ .u_cfg.utmi = {
+ .hssync_start_delay = 0,
+ .elastic_limit = 16,
+ .idle_wait_delay = 17,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .vbus_oc_map = 0x5,
+ },
+};
+
+static struct tegra_usb_platform_data tegra_ehci3_utmi_pdata = {
+ .port_otg = false,
+ .has_hostpc = true,
+ .unaligned_dma_buf_supported = false,
+ .phy_intf = TEGRA_USB_PHY_INTF_UTMI,
+ .op_mode = TEGRA_USB_OPMODE_HOST,
+ .u_data.host = {
+ .vbus_gpio = -1,
+ .hot_plug = false,
+ .remote_wakeup_supported = true,
+ .power_off_on_suspend = true,
+ },
+ .u_cfg.utmi = {
+ .hssync_start_delay = 0,
+ .elastic_limit = 16,
+ .idle_wait_delay = 17,
+ .term_range_adj = 6,
+ .xcvr_setup = 8,
+ .xcvr_lsfslew = 2,
+ .xcvr_lsrslew = 2,
+ .xcvr_setup_offset = 0,
+ .xcvr_use_fuses = 1,
+ .vbus_oc_map = 0x5,
+ },
+};
+
+static struct tegra_usb_otg_data tegra_otg_pdata = {
+ .ehci_device = &tegra_ehci1_device,
+ .ehci_pdata = &tegra_ehci1_utmi_pdata,
+};
+
+static void apalis_tk1_usb_init(void)
+{
+ int usb_port_owner_info = tegra_get_usb_port_owner_info();
+/* TBD
+ tegra_ehci1_utmi_pdata.u_data.host.turn_off_vbus_on_lp0 = true; */
+
+ tegra_udc_pdata.id_det_type = TEGRA_USB_ID;
+ tegra_ehci1_utmi_pdata.id_det_type = TEGRA_USB_ID;
+
+ if (!(usb_port_owner_info & UTMI1_PORT_OWNER_XUSB)) {
+ tegra_otg_pdata.is_xhci = false;
+ tegra_udc_pdata.u_data.dev.is_xhci = false;
+ } else {
+ tegra_otg_pdata.is_xhci = true;
+ tegra_udc_pdata.u_data.dev.is_xhci = true;
+ }
+ tegra_otg_device.dev.platform_data = &tegra_otg_pdata;
+ platform_device_register(&tegra_otg_device);
+ /* Setup the udc platform data */
+ tegra_udc_device.dev.platform_data = &tegra_udc_pdata;
+
+ if (!(usb_port_owner_info & UTMI2_PORT_OWNER_XUSB)) {
+ tegra_ehci2_device.dev.platform_data = &tegra_ehci2_utmi_pdata;
+ platform_device_register(&tegra_ehci2_device);
+ }
+
+ if (!(usb_port_owner_info & UTMI2_PORT_OWNER_XUSB)) {
+ tegra_ehci3_device.dev.platform_data = &tegra_ehci3_utmi_pdata;
+ platform_device_register(&tegra_ehci3_device);
+ }
+}
+
+static struct tegra_xusb_platform_data xusb_pdata = {
+ .portmap = TEGRA_XUSB_SS_P0 | TEGRA_XUSB_USB2_P0 | TEGRA_XUSB_SS_P1 |
+ TEGRA_XUSB_USB2_P1 | TEGRA_XUSB_USB2_P2,
+};
+
+#ifdef CONFIG_TEGRA_XUSB_PLATFORM
+static void apalis_tk1_xusb_init(void)
+{
+ int usb_port_owner_info = tegra_get_usb_port_owner_info();
+
+ xusb_pdata.lane_owner = (u8) tegra_get_lane_owner_info();
+
+ if (!(usb_port_owner_info & UTMI1_PORT_OWNER_XUSB))
+ xusb_pdata.portmap &= ~(TEGRA_XUSB_USB2_P0);
+ if (!(usb_port_owner_info & UTMI2_PORT_OWNER_XUSB))
+ xusb_pdata.portmap &= ~(TEGRA_XUSB_USB2_P2 |
+ TEGRA_XUSB_USB2_P1 | TEGRA_XUSB_SS_P0);
+ xusb_pdata.portmap &= ~(TEGRA_XUSB_SS_P1);
+
+ //TBD: UTMI3
+}
+#endif
+
+#ifdef CONFIG_USE_OF
+static struct of_dev_auxdata apalis_tk1_auxdata_lookup[] __initdata = {
+ T124_SPI_OF_DEV_AUXDATA,
+ OF_DEV_AUXDATA("nvidia,tegra124-apbdma", 0x60020000, "tegra-apbdma",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-se", 0x70012000, "tegra12-se", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-host1x", TEGRA_HOST1X_BASE, "host1x",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-gk20a", TEGRA_GK20A_BAR0_BASE,
+ "gk20a.0", NULL),
+#ifdef CONFIG_ARCH_TEGRA_VIC
+ OF_DEV_AUXDATA("nvidia,tegra124-vic", TEGRA_VIC_BASE, "vic03.0", NULL),
+#endif
+ OF_DEV_AUXDATA("nvidia,tegra124-msenc", TEGRA_MSENC_BASE, "msenc",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-vi", TEGRA_VI_BASE, "vi.0", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-isp", TEGRA_ISP_BASE, "isp.0", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-isp", TEGRA_ISPB_BASE, "isp.1", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-tsec", TEGRA_TSEC_BASE, "tsec", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra114-hsuart", 0x70006000, "serial-tegra.0",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra114-hsuart", 0x70006040, "serial-tegra.1",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra114-hsuart", 0x70006200, "serial-tegra.2",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra114-hsuart", 0x70006300, "serial-tegra.3",
+ NULL),
+ T124_I2C_OF_DEV_AUXDATA,
+ OF_DEV_AUXDATA("nvidia,tegra124-xhci", 0x70090000, "tegra-xhci",
+ &xusb_pdata),
+ OF_DEV_AUXDATA("nvidia,tegra124-dc", TEGRA_DISPLAY_BASE, "tegradc.0",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-dc", TEGRA_DISPLAY2_BASE, "tegradc.1",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-nvavp", 0x60001000, "nvavp",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-pwm", 0x7000a000, "tegra-pwm", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-dfll", 0x70110000, "tegra_cl_dvfs",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra132-dfll", 0x70040084, "tegra_cl_dvfs",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-efuse", TEGRA_FUSE_BASE, "tegra-fuse",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra124-camera", 0, "pcl-generic",
+ NULL),
+ OF_DEV_AUXDATA("nvidia,tegra114-ahci-sata", 0x70027000, "tegra-sata.0",
+ NULL),
+ {}
+};
+#endif
+
+static void __init edp_init(void)
+{
+ apalis_tk1_edp_init();
+}
+
+static void __init tegra_apalis_tk1_early_init(void)
+{
+ tegra_clk_init_from_table(apalis_tk1_clk_init_table);
+ tegra_clk_verify_parents();
+ tegra_soc_device_init("apalis-tk1");
+}
+
+static struct tegra_dtv_platform_data apalis_tk1_dtv_pdata = {
+ .dma_req_selector = 11,
+};
+
+static void __init apalis_tk1_dtv_init(void)
+{
+ tegra_dtv_device.dev.platform_data = &apalis_tk1_dtv_pdata;
+ platform_device_register(&tegra_dtv_device);
+}
+
+static struct tegra_io_dpd pexbias_io = {
+ .name = "PEX_BIAS",
+ .io_dpd_reg_index = 0,
+ .io_dpd_bit = 4,
+};
+
+static struct tegra_io_dpd pexclk1_io = {
+ .name = "PEX_CLK1",
+ .io_dpd_reg_index = 0,
+ .io_dpd_bit = 5,
+};
+
+static struct tegra_io_dpd pexclk2_io = {
+ .name = "PEX_CLK2",
+ .io_dpd_reg_index = 0,
+ .io_dpd_bit = 6,
+};
+
+static void __init tegra_apalis_tk1_late_init(void)
+{
+ apalis_tk1_display_init();
+ apalis_tk1_uart_init();
+ apalis_tk1_usb_init();
+#ifdef CONFIG_TEGRA_XUSB_PLATFORM
+ apalis_tk1_xusb_init();
+#endif
+ apalis_tk1_i2c_init();
+ platform_add_devices(apalis_tk1_devices,
+ ARRAY_SIZE(apalis_tk1_devices));
+ platform_device_register(&apalis_tk1_audio_device_sgtl5000);
+ tegra_io_dpd_init();
+ apalis_tk1_sdhci_init();
+
+ apalis_tk1_regulator_init();
+
+ apalis_tk1_dtv_init();
+ apalis_tk1_suspend_init();
+
+ apalis_tk1_emc_init();
+
+ edp_init();
+ isomgr_init();
+ apalis_tk1_panel_init();
+
+ /* put PEX pads into DPD mode to save additional power */
+ tegra_io_dpd_enable(&pexbias_io);
+ tegra_io_dpd_enable(&pexclk1_io);
+ tegra_io_dpd_enable(&pexclk2_io);
+
+#ifdef CONFIG_TEGRA_WDT_RECOVERY
+ tegra_wdt_recovery_init();
+#endif
+
+ apalis_tk1_sensors_init();
+ apalis_tk1_soctherm_init();
+}
+
+static void __init tegra_apalis_tk1_init_early(void)
+{
+ apalis_tk1_rail_alignment_init();
+ tegra12x_init_early();
+}
+
+static void __init tegra_apalis_tk1_dt_init(void)
+{
+ tegra_apalis_tk1_early_init();
+#ifdef CONFIG_NVMAP_USE_CMA_FOR_CARVEOUT
+ carveout_linear_set(&tegra_generic_cma_dev);
+ carveout_linear_set(&tegra_vpr_cma_dev);
+#endif
+#ifdef CONFIG_USE_OF
+ apalis_tk1_camera_auxdata(apalis_tk1_auxdata_lookup);
+ of_platform_populate(NULL,
+ of_default_bus_match_table,
+ apalis_tk1_auxdata_lookup, &platform_bus);
+#endif
+
+ tegra_apalis_tk1_late_init();
+}
+
+/*
+ * The Apalis evaluation board needs to set the link speed to 2.5 GT/s (GEN1).
+ * The default link speed setting is 5 GT/s (GEN2). 0x98 is the Link Control 2
+ * PCIe Capability Register of the PEX8605 PCIe switch.
+ * With the default speed setting of 5 GT/s (GEN2) the switch does not bring up
+ * any of its down stream links. Limiting it to GEN1 makes them down stream
+ * links to show up and work however as GEN1 only.
+ */
+static void quirk_apalis_plx_gen1(struct pci_dev *dev)
+{
+ pci_write_config_dword(dev, 0x98, 0x01);
+ mdelay(50);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8605, quirk_apalis_plx_gen1);
+
+static void __init tegra_apalis_tk1_reserve(void)
+{
+#ifdef CONFIG_TEGRA_HDMI_PRIMARY
+ ulong tmp;
+#endif /* CONFIG_TEGRA_HDMI_PRIMARY */
+
+#if defined(CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM) || \
+ defined(CONFIG_TEGRA_NO_CARVEOUT)
+ ulong carveout_size = 0;
+ ulong fb2_size = SZ_16M;
+#else
+ ulong carveout_size = SZ_1G;
+ ulong fb2_size = SZ_4M;
+#endif
+ ulong fb1_size = SZ_16M + SZ_2M;
+ ulong vpr_size = 186 * SZ_1M;
+
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
+ /* support FBcon on 4K monitors */
+ fb2_size = SZ_64M + SZ_8M; /* 4096*2160*4*2 = 70778880 bytes */
+#endif /* CONFIG_FRAMEBUFFER_CONSOLE */
+
+#ifdef CONFIG_TEGRA_HDMI_PRIMARY
+ tmp = fb1_size;
+ fb1_size = fb2_size;
+ fb2_size = tmp;
+#endif /* CONFIG_TEGRA_HDMI_PRIMARY */
+
+ tegra_reserve4(carveout_size, fb1_size, fb2_size, vpr_size);
+}
+
+static const char *const apalis_tk1_dt_board_compat[] = {
+ "toradex,apalis-tk1",
+ NULL
+};
+
+DT_MACHINE_START(APALIS_TK1, "apalis-tk1")
+ .atag_offset = 0x100,
+ .smp = smp_ops(tegra_smp_ops),
+ .map_io = tegra_map_common_io,
+ .reserve = tegra_apalis_tk1_reserve,
+ .init_early = tegra_apalis_tk1_init_early,
+ .init_irq = irqchip_init,
+ .init_time = clocksource_of_init,
+ .init_machine = tegra_apalis_tk1_dt_init,
+ .restart = tegra_assert_system_reset,
+ .dt_compat = apalis_tk1_dt_board_compat,
+ .init_late = tegra_init_late
+MACHINE_END
diff --git a/arch/arm/mach-tegra/board-apalis-tk1.h b/arch/arm/mach-tegra/board-apalis-tk1.h
new file mode 100644
index 000000000000..3085eb757a9c
--- /dev/null
+++ b/arch/arm/mach-tegra/board-apalis-tk1.h
@@ -0,0 +1,162 @@
+/*
+ * arch/arm/mach-tegra/board-apalis-tk1.h
+ *
+ * Copyright (c) 2016-2017, Toradex AG. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MACH_TEGRA_BOARD_APALIS_TK1_H
+#define _MACH_TEGRA_BOARD_APALIS_TK1_H
+
+#include <mach/gpio-tegra.h>
+#include <mach/irqs.h>
+#include "gpio-names.h"
+
+//#define APALIS_TK1_EDP
+
+/* GPIO */
+
+#define APALIS_GPIO1 TEGRA_GPIO_PFF2
+#define APALIS_GPIO2 TEGRA_GPIO_PFF0
+#ifdef APALIS_TK1_V10
+#define APALIS_GPIO3 TEGRA_GPIO_PV4 /* open-drain only */
+#define APALIS_GPIO4 TEGRA_GPIO_PV5 /* open-drain only */
+#else
+#define APALIS_GPIO3 TEGRA_GPIO_PN4
+#define APALIS_GPIO4 TEGRA_GPIO_PN5
+#endif
+#define APALIS_GPIO5 TEGRA_GPIO_PDD5
+#define APALIS_GPIO6 TEGRA_GPIO_PDD6
+#define APALIS_GPIO7 TEGRA_GPIO_PDD1
+#define APALIS_GPIO8 TEGRA_GPIO_PDD2
+
+#define BKL1_ON TEGRA_GPIO_PBB5
+
+/* TBD: Camera */
+#define CAM_RSTN TEGRA_GPIO_PBB3
+#define CAM_FLASH_STROBE TEGRA_GPIO_PBB4
+#define CAM2_PWDN TEGRA_GPIO_PBB6
+#define CAM1_PWDN TEGRA_GPIO_PBB5
+#define CAM_AF_PWDN TEGRA_GPIO_PBB7
+#define CAM_BOARD_E1806
+
+#define FAN_EN APALIS_GPIO8
+
+#define HDMI1_HPD TEGRA_GPIO_PN7
+
+#define I2C1_SCL TEGRA_GPIO_PC4
+#define I2C1_SDA TEGRA_GPIO_PC5
+
+#define I2C2_SCL TEGRA_GPIO_PT5
+#define I2C2_SDA TEGRA_GPIO_PT6
+
+#define I2C3_SCL TEGRA_GPIO_PBB1
+#define I2C3_SDA TEGRA_GPIO_PBB2
+
+#define LAN_DEV_OFF_N TEGRA_GPIO_PO6
+#define LAN_RESET_N TEGRA_GPIO_PS2
+#define LAN_WAKE_N TEGRA_GPIO_PO5
+
+#define MMC1_CD_N TEGRA_GPIO_PV3
+
+#define PEX_PERST_N APALIS_GPIO7
+
+#define PWR_I2C_SCL TEGRA_GPIO_PZ6
+#define PWR_I2C_SDA TEGRA_GPIO_PZ7
+
+#define RESET_MOCI_CTRL TEGRA_GPIO_PU4
+
+#define SATA1_ACT_N TEGRA_GPIO_PN2
+
+#define SD1_CD_N TEGRA_GPIO_PEE4
+
+#define THERMD_ALERT_N TEGRA_GPIO_PI6
+
+#define TOUCH_WIPER APALIS_GPIO6
+
+/* TS1 reserved for recovery circuit */
+/* TS2 reserved for PMIC power button */
+/* TS3, TS4, TS5 and TS6 are not connected by default */
+
+#define USBH_EN TEGRA_GPIO_PN5
+#define USBH_OC_N TEGRA_GPIO_PBB0
+#define USBO1_EN TEGRA_GPIO_PN4
+#define USBO1_OC_N TEGRA_GPIO_PBB4
+
+#define WAKE1_MICO TEGRA_GPIO_PDD3
+
+/*
+ * Uncomment to use MXM3 pins 144, 146, 152, 156, 160, 162 & 164 for LEDs,
+ * PCIE1_WDISABLE_N, SW3 and UART2_3_RS232_FOFF_N on Ixora carrier board
+ */
+//#define IXORA
+#ifdef IXORA
+#define LED4_GREEN TEGRA_GPIO_PY5
+#define LED4_RED TEGRA_GPIO_PY4
+#define LED5_GREEN TEGRA_GPIO_PW5
+#define LED5_RED TEGRA_GPIO_PEE5
+#define PCIE1_WDISABLE_N TEGRA_GPIO_PY7
+#define SW3 TEGRA_GPIO_PY6
+#define UART2_3_RS232_FOFF_N TEGRA_GPIO_PV3
+#endif /* IXORA */
+
+/* Uncomment for Apalis TK1 V1.0A prototypes and V1.0B samples */
+//#define APALIS_TK1_V10
+
+/* generated soc_therm OC interrupts */
+#define TEGRA_SOC_OC_IRQ_BASE TEGRA_NR_IRQS
+#define TEGRA_SOC_OC_NUM_IRQ TEGRA_SOC_OC_IRQ_MAX
+
+/* External peripheral act as interrupt controller */
+
+#define UTMI1_PORT_OWNER_XUSB 0x1
+#define UTMI2_PORT_OWNER_XUSB 0x2
+#define UTMI3_PORT_OWNER_XUSB 0x4
+
+/* HDMI Hotplug detection pin */
+#define apalis_tk1_hdmi_hpd HDMI1_HPD
+
+/* I2C related GPIOs */
+#define TEGRA_GPIO_I2C1_SCL I2C1_SCL
+#define TEGRA_GPIO_I2C1_SDA I2C1_SDA
+#define TEGRA_GPIO_I2C2_SCL I2C2_SCL
+#define TEGRA_GPIO_I2C2_SDA I2C2_SDA
+#define TEGRA_GPIO_I2C3_SCL I2C3_SCL
+#define TEGRA_GPIO_I2C3_SDA I2C3_SDA
+#define TEGRA_GPIO_I2C5_SCL PWR_I2C_SCL
+#define TEGRA_GPIO_I2C5_SDA PWR_I2C_SDA
+
+/* SATA Specific */
+
+#define CLK_RST_CNTRL_RST_DEV_W_SET 0x7000E438
+#define CLK_RST_CNTRL_RST_DEV_V_SET 0x7000E430
+#define SET_CEC_RST 0x100
+
+int apalis_tk1_display_init(void);
+int apalis_tk1_edp_init(void);
+int apalis_tk1_emc_init(void);
+int apalis_tk1_panel_init(void);
+int apalis_tk1_rail_alignment_init(void);
+int apalis_tk1_regulator_init(void);
+int apalis_tk1_sdhci_init(void);
+int apalis_tk1_sensors_init(void);
+int apalis_tk1_soctherm_init(void);
+int apalis_tk1_suspend_init(void);
+void apalis_tk1_camera_auxdata(void *);
+void apalis_tk1_new_sysedp_init(void);
+void apalis_tk1_sysedp_batmon_init(void);
+void apalis_tk1_sysedp_dynamic_capping_init(void);
+
+#endif
diff --git a/arch/arm/mach-tegra/board-ardbeg-sensors.c b/arch/arm/mach-tegra/board-ardbeg-sensors.c
index e34ea1c39559..aa241d28c198 100644
--- a/arch/arm/mach-tegra/board-ardbeg-sensors.c
+++ b/arch/arm/mach-tegra/board-ardbeg-sensors.c
@@ -341,24 +341,6 @@ static struct platform_device ardbeg_ar0330_soc_camera_device = {
};
#endif
-static struct regulator *ardbeg_vcmvdd;
-
-static int ardbeg_get_extra_regulators(void)
-{
- if (!ardbeg_vcmvdd) {
- ardbeg_vcmvdd = regulator_get(NULL, "avdd_af1_cam");
- if (WARN_ON(IS_ERR(ardbeg_vcmvdd))) {
- pr_err("%s: can't get regulator avdd_af1_cam: %ld\n",
- __func__, PTR_ERR(ardbeg_vcmvdd));
- regulator_put(ardbeg_vcmvdd);
- ardbeg_vcmvdd = NULL;
- return -ENODEV;
- }
- }
-
- return 0;
-}
-
static struct tegra_io_dpd csia_io = {
.name = "CSIA",
.io_dpd_reg_index = 0,
@@ -377,6 +359,222 @@ static struct tegra_io_dpd csie_io = {
.io_dpd_bit = 12,
};
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AP1302)
+static int ardbeg_ap1302_power(struct device *dev, int enable)
+{
+ if(enable) {
+ /* disable CSIA IOs DPD mode to turn on camera for ardbeg */
+ tegra_io_dpd_disable(&csia_io);
+ tegra_io_dpd_disable(&csib_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ tegra_io_dpd_enable(&csib_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info ardbeg_ap1302_camera_i2c_device = {
+ I2C_BOARD_INFO("ap1302", 0x3c),
+};
+
+static struct tegra_camera_platform_data ardbeg_ap1302_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 4,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link ap1302_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &ardbeg_ap1302_camera_i2c_device,
+ .module_name = "ap1302",
+ .i2c_adapter_id = 2,
+ .power = ardbeg_ap1302_power,
+ .priv = &ardbeg_ap1302_camera_platform_data,
+};
+
+static struct platform_device ardbeg_ap1302_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 2,
+ .dev = {
+ .platform_data = &ap1302_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_OV5640)
+static int ardbeg_ov5640_power(struct device *dev, int enable)
+ {
+ if(enable) {
+ tegra_io_dpd_disable(&csia_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ }
+ return 0;
+ }
+
+static struct i2c_board_info ardbeg_ov5640_camera_i2c_device = {
+ I2C_BOARD_INFO("ov5640", 0x3c),
+};
+
+static struct tegra_camera_platform_data ardbeg_ov5640_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 2,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link ov5640_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &ardbeg_ov5640_camera_i2c_device,
+ .module_name = "ov5640",
+ .i2c_adapter_id = 2,
+ .power = ardbeg_ov5640_power,
+ .priv = &ardbeg_ov5640_camera_platform_data,
+};
+
+static struct platform_device ardbeg_ov5640_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 3,
+ .dev = {
+ .platform_data = &ov5640_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_TC358743)
+static int ardbeg_tc358743_power(struct device *dev, int enable)
+{
+ if(enable) {
+ tegra_io_dpd_disable(&csia_io);
+ } else {
+ tegra_io_dpd_enable(&csia_io);
+ }
+ return 0;
+}
+
+static struct i2c_board_info ardbeg_tc358743_camera_i2c_device = {
+ I2C_BOARD_INFO("tc358743", 0x0f),
+};
+
+static struct tegra_camera_platform_data ardbeg_tc358743_camera_platform_data = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 2,
+ .continuous_clk = 1,
+};
+
+static struct soc_camera_link tc358743_iclink = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &ardbeg_tc358743_camera_i2c_device,
+ .module_name = "tc358743",
+ .i2c_adapter_id = 2, /* change to 1 if you have auvidea's B100 HDMI to CSI-2 Bridge */
+ .power = ardbeg_tc358743_power,
+ .priv = &ardbeg_tc358743_camera_platform_data,
+};
+
+static struct platform_device ardbeg_tc358743_soc_camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 4,
+ .dev = {
+ .platform_data = &tc358743_iclink,
+ },
+};
+#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_ADV7280)
+static int ardbeg_adv7280_power(struct device *dev, int enable)
+{
+ if(enable) {
+ tegra_io_dpd_disable(&csie_io);
+ } else {
+ tegra_io_dpd_enable(&csie_io);
+ }
+ return 0;
+}
+
+
+static struct i2c_board_info ardbeg_adv7280_camera_i2c_device_a = {
+ I2C_BOARD_INFO("adv7280", 0x20),
+};
+
+static struct tegra_camera_platform_data ardbeg_adv7280_camera_platform_data_a = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_A,
+ .lanes = 1,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link adv7280_iclink_a = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &ardbeg_adv7280_camera_i2c_device_a,
+ .module_name = "adv7280",
+ .i2c_adapter_id = 2,
+ .power = ardbeg_adv7280_power,
+ .priv = &ardbeg_adv7280_camera_platform_data_a,
+};
+
+static struct platform_device ardbeg_adv7280_soc_camera_device_a = {
+ .name = "soc-camera-pdrv",
+ .id = 5,
+ .dev = {
+ .platform_data = &adv7280_iclink_a,
+ },
+};
+
+/* second ADV7280M connected to CSI CIL-E */
+static struct i2c_board_info ardbeg_adv7280_camera_i2c_device_b = {
+ I2C_BOARD_INFO("adv7280", 0x21),
+};
+
+static struct tegra_camera_platform_data ardbeg_adv7280_camera_platform_data_b = {
+ .flip_v = 0,
+ .flip_h = 0,
+ .port = TEGRA_CAMERA_PORT_CSI_C,
+ .lanes = 1,
+ .continuous_clk = 0,
+};
+
+static struct soc_camera_link adv7280_iclink_b = {
+ .bus_id = 0, /* This must match the .id of tegra_vi01_device */
+ .board_info = &ardbeg_adv7280_camera_i2c_device_b,
+ .module_name = "adv7280",
+ .i2c_adapter_id = 2,
+ .power = ardbeg_adv7280_power,
+ .priv = &ardbeg_adv7280_camera_platform_data_b,
+};
+
+static struct platform_device ardbeg_adv7280_soc_camera_device_b = {
+ .name = "soc-camera-pdrv",
+ .id = 6,
+ .dev = {
+ .platform_data = &adv7280_iclink_b,
+ },
+};
+#endif
+
+static struct regulator *ardbeg_vcmvdd;
+
+static int ardbeg_get_extra_regulators(void)
+{
+ if (!ardbeg_vcmvdd) {
+ ardbeg_vcmvdd = regulator_get(NULL, "avdd_af1_cam");
+ if (WARN_ON(IS_ERR(ardbeg_vcmvdd))) {
+ pr_err("%s: can't get regulator avdd_af1_cam: %ld\n",
+ __func__, PTR_ERR(ardbeg_vcmvdd));
+ regulator_put(ardbeg_vcmvdd);
+ ardbeg_vcmvdd = NULL;
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
static int ardbeg_ar0330_front_power_on(struct ar0330_power_rail *pw)
{
int err;
@@ -1556,9 +1754,24 @@ static int ardbeg_camera_init(void)
#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0261)
platform_device_register(&ardbeg_ar0261_soc_camera_device);
#endif
+
#if IS_ENABLED(CONFIG_SOC_CAMERA_AR0330)
platform_device_register(&ardbeg_ar0330_soc_camera_device);
#endif
+
+#if IS_ENABLED(CONFIG_SOC_CAMERA_AP1302)
+ platform_device_register(&ardbeg_ap1302_soc_camera_device);
+#endif
+#if IS_ENABLED(CONFIG_SOC_CAMERA_OV5640)
+ platform_device_register(&ardbeg_ov5640_soc_camera_device);
+#endif
+#if IS_ENABLED(CONFIG_SOC_CAMERA_TC358743)
+ platform_device_register(&ardbeg_tc358743_soc_camera_device);
+#endif
+#if IS_ENABLED(CONFIG_SOC_CAMERA_ADV7280)
+ platform_device_register(&ardbeg_adv7280_soc_camera_device_a);
+ platform_device_register(&ardbeg_adv7280_soc_camera_device_b);
+#endif
return 0;
}
diff --git a/arch/arm/mach-tegra/board-ardbeg.c b/arch/arm/mach-tegra/board-ardbeg.c
index d6423a38cb87..e4da9c1cd385 100644
--- a/arch/arm/mach-tegra/board-ardbeg.c
+++ b/arch/arm/mach-tegra/board-ardbeg.c
@@ -1161,7 +1161,7 @@ static int __init ardbeg_touch_init(void)
if (board_info.fab >= 0xa3) {
rm31080ts_t132loki_data.name_of_clock = NULL;
rm31080ts_t132loki_data.name_of_clock_con = NULL;
- } else
+ } else {
tegra_clk_init_from_table(touch_clk_init_table);
rm31080a_ardbeg_spi_board[0].irq =
gpio_to_irq(TOUCH_GPIO_IRQ_RAYDIUM_SPI);
@@ -1170,6 +1170,7 @@ static int __init ardbeg_touch_init(void)
&rm31080ts_t132loki_data,
&rm31080a_ardbeg_spi_board[0],
ARRAY_SIZE(rm31080a_ardbeg_spi_board));
+ }
} else if (board_info.board_id == BOARD_P1761) {
rm31080a_tn8_spi_board[0].irq =
gpio_to_irq(TOUCH_GPIO_IRQ_RAYDIUM_SPI);
@@ -1475,10 +1476,10 @@ static void __init tegra_ardbeg_late_init(void)
else
ardbeg_panel_init();
- /* put PEX pads into DPD mode to save additional power */
- tegra_io_dpd_enable(&pexbias_io);
- tegra_io_dpd_enable(&pexclk1_io);
- tegra_io_dpd_enable(&pexclk2_io);
+ /* put PEX pads into DPD mode to save additional power */
+ tegra_io_dpd_enable(&pexbias_io);
+ tegra_io_dpd_enable(&pexclk1_io);
+ tegra_io_dpd_enable(&pexclk2_io);
if (board_info.board_id == BOARD_E2548 ||
board_info.board_id == BOARD_P2530)
@@ -1498,12 +1499,12 @@ static void __init tegra_ardbeg_late_init(void)
board_info.board_id == BOARD_PM363) {
ardbeg_sensors_init();
norrin_soctherm_init();
- } else if (board_info.board_id == BOARD_E2548 ||
+ } else if (board_info.board_id == BOARD_E2548 ||
board_info.board_id == BOARD_P2530) {
loki_sensors_init();
loki_fan_init();
loki_soctherm_init();
- } else {
+ } else {
ardbeg_sensors_init();
ardbeg_soctherm_init();
}
@@ -1610,10 +1611,12 @@ static const char * const bowmore_dt_board_compat[] = {
NULL
};
+#ifdef CONFIG_ARCH_TEGRA_13x_SOC
static const char * const loki_dt_board_compat[] = {
"nvidia,t132loki",
NULL
};
+#endif
static const char * const jetson_dt_board_compat[] = {
"nvidia,jetson-tk1",
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 23ae029a3ec2..c9c3858f5ae9 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -2342,19 +2342,10 @@ static const char * __init tegra_get_family(void)
return kasprintf(GFP_KERNEL, "Tegra%d", cid);
}
-static const char * __init tegra_get_soc_id(void)
-{
- int package_id = tegra_package_id();
-
- return kasprintf(GFP_KERNEL, "REV=%s:SKU=0x%x:PID=0x%x",
- tegra_revision_name[tegra_revision],
- tegra_get_sku_id(), package_id);
-}
-
static void __init tegra_soc_info_populate(struct soc_device_attribute
*soc_dev_attr, const char *machine)
{
- soc_dev_attr->soc_id = tegra_get_soc_id();
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%llx", tegra_chip_uid());
soc_dev_attr->machine = machine;
soc_dev_attr->family = tegra_get_family();
soc_dev_attr->revision = tegra_get_revision();
diff --git a/arch/arm/mach-tegra/include/mach/dc.h b/arch/arm/mach-tegra/include/mach/dc.h
index 00ae4cb2c9f8..c700644d7168 100644
--- a/arch/arm/mach-tegra/include/mach/dc.h
+++ b/arch/arm/mach-tegra/include/mach/dc.h
@@ -563,6 +563,8 @@ struct tegra_dc_out {
unsigned depth;
unsigned dither;
+ const char *default_mode;
+
struct tegra_dc_mode *modes;
int n_modes;
@@ -837,6 +839,8 @@ int tegra_dc_config_frame_end_intr(struct tegra_dc *dc, bool enable);
bool tegra_dc_is_within_n_vsync(struct tegra_dc *dc, s64 ts);
bool tegra_dc_does_vsync_separate(struct tegra_dc *dc, s64 new_ts, s64 old_ts);
+int tegra_dc_var_to_dc_mode(struct tegra_dc *dc, struct fb_var_screeninfo *var,
+ struct tegra_dc_mode *mode);
int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode);
struct fb_videomode;
int tegra_dc_to_fb_videomode(struct fb_videomode *fbmode,
@@ -889,6 +893,9 @@ int tegra_dc_set_flip_callback(void (*callback)(void));
int tegra_dc_unset_flip_callback(void);
int tegra_dc_get_panel_sync_rate(void);
+int tegra_fb_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
+ const char* option, unsigned int default_bpp);
+
int tegra_dc_get_out(const struct tegra_dc *dc);
struct device_node *tegra_panel_get_dt_node(
diff --git a/arch/arm/mach-tegra/panel-a-edp-1080p-14-0.c b/arch/arm/mach-tegra/panel-a-edp-1080p-14-0.c
index e530d68401d5..2a647eabf261 100644
--- a/arch/arm/mach-tegra/panel-a-edp-1080p-14-0.c
+++ b/arch/arm/mach-tegra/panel-a-edp-1080p-14-0.c
@@ -33,7 +33,11 @@
#define DC_CTRL_MODE TEGRA_DC_OUT_CONTINUOUS_MODE
+#ifdef CONFIG_MACH_APALIS_TK1
+#define EDP_PANEL_BL_PWM TEGRA_GPIO_PU6
+#else
#define EDP_PANEL_BL_PWM TEGRA_GPIO_PH1
+#endif
static bool reg_requested;
static bool gpio_requested;
@@ -168,6 +172,16 @@ static int laguna_edp_regulator_get(struct device *dev)
goto fail;
}
+#ifdef CONFIG_MACH_APALIS_TK1
+ avdd_3v3_dp = regulator_get(dev, "avdd_3v3_dp");
+ if (IS_ERR_OR_NULL(avdd_3v3_dp)) {
+ pr_err("avdd_3v3_dp regulator get failed\n");
+ err = PTR_ERR(avdd_3v3_dp);
+ avdd_3v3_dp = NULL;
+ goto fail;
+ }
+#endif /* CONFIG_MACH_APALIS_TK1 */
+
reg_requested = true;
return 0;
fail:
@@ -272,6 +286,13 @@ static int edp_a_1080p_14_0_enable(struct device *dev)
pr_err("avdd_3v3_dp regulator enable failed\n");
goto fail;
}
+#ifdef CONFIG_MACH_APALIS_TK1
+ err = regulator_set_voltage(avdd_3v3_dp, 3300000, 3300000);
+ if (err < 0) {
+ pr_err("avdd_3v3_dp regulator_set_voltage to 3.3V failed\n");
+ goto fail;
+ }
+#endif /* CONFIG_MACH_APALIS_TK1 */
}
msleep(20);
@@ -380,7 +401,11 @@ static int edp_a_1080p_14_0_check_fb(struct device *dev, struct fb_info *info)
}
static struct platform_pwm_backlight_data edp_a_1080p_14_0_bl_data = {
+#ifdef CONFIG_MACH_APALIS_TK1
+ .pwm_id = 3,
+#else
.pwm_id = 1,
+#endif
.max_brightness = 255,
.dft_brightness = 224,
.pwm_period_ns = 1000000,
diff --git a/arch/arm/mach-tegra/panel-c-lvds-1366-14.c b/arch/arm/mach-tegra/panel-c-lvds-1366-14.c
index cb4df6c6f49f..cc9a3ba8bb73 100644
--- a/arch/arm/mach-tegra/panel-c-lvds-1366-14.c
+++ b/arch/arm/mach-tegra/panel-c-lvds-1366-14.c
@@ -33,7 +33,11 @@
#define DC_CTRL_MODE TEGRA_DC_OUT_CONTINUOUS_MODE
+#ifdef CONFIG_MACH_APALIS_TK1
+#define LVDS_PANEL_BL_PWM TEGRA_GPIO_PU6
+#else
#define LVDS_PANEL_BL_PWM TEGRA_GPIO_PH1
+#endif
static bool reg_requested;
static bool gpio_requested;
@@ -315,22 +319,6 @@ static struct tegra_dc_out_pin lvds_out_pins[] = {
},
};
-static struct tegra_dc_mode lvds_c_1366_14_modes[] = {
- {
- .pclk = 74720100, /* 1366 x 768 @ 60hz */
- .h_ref_to_sync = 1,
- .v_ref_to_sync = 1,
- .h_sync_width = 45,
- .v_sync_width = 7,
- .h_back_porch = 113,
- .v_back_porch = 21,
- .h_active = 1366,
- .v_active = 768,
- .h_front_porch = 68,
- .v_front_porch = 4,
- },
-};
-
static int lvds_c_1366_14_bl_notify(struct device *unused, int brightness)
{
int cur_sd_brightness = atomic_read(&sd_brightness);
@@ -353,7 +341,11 @@ static int lvds_c_1366_14_check_fb(struct device *dev, struct fb_info *info)
}
static struct platform_pwm_backlight_data lvds_c_1366_14_bl_data = {
+#ifdef CONFIG_MACH_APALIS_TK1
+ .pwm_id = 3,
+#else
.pwm_id = 1,
+#endif
.max_brightness = 255,
.dft_brightness = 224,
.pwm_period_ns = 1000000,
@@ -400,8 +392,6 @@ static void lvds_c_1366_14_dc_out_init(struct tegra_dc_out *dc)
dc->align = TEGRA_DC_ALIGN_MSB,
dc->order = TEGRA_DC_ORDER_RED_BLUE,
dc->flags = DC_CTRL_MODE;
- dc->modes = lvds_c_1366_14_modes;
- dc->n_modes = ARRAY_SIZE(lvds_c_1366_14_modes);
dc->out_pins = lvds_out_pins,
dc->n_out_pins = ARRAY_SIZE(lvds_out_pins),
dc->depth = 18,
@@ -413,12 +403,6 @@ static void lvds_c_1366_14_dc_out_init(struct tegra_dc_out *dc)
dc->height = 174;
}
-static void lvds_c_1366_14_fb_data_init(struct tegra_fb_data *fb)
-{
- fb->xres = lvds_c_1366_14_modes[0].h_active;
- fb->yres = lvds_c_1366_14_modes[0].v_active;
-}
-
static void
lvds_c_1366_14_sd_settings_init(struct tegra_dc_sd_settings *settings)
{
@@ -429,9 +413,7 @@ lvds_c_1366_14_sd_settings_init(struct tegra_dc_sd_settings *settings)
struct tegra_panel __initdata lvds_c_1366_14 = {
.init_sd_settings = lvds_c_1366_14_sd_settings_init,
.init_dc_out = lvds_c_1366_14_dc_out_init,
- .init_fb_data = lvds_c_1366_14_fb_data_init,
.register_bl_dev = lvds_c_1366_14_register_bl_dev,
.set_disp_device = lvds_c_1366_14_set_disp_device,
};
EXPORT_SYMBOL(lvds_c_1366_14);
-
diff --git a/arch/arm/mach-tegra/powergate-t12x.c b/arch/arm/mach-tegra/powergate-t12x.c
index 36deee3172c6..dd8126931294 100644
--- a/arch/arm/mach-tegra/powergate-t12x.c
+++ b/arch/arm/mach-tegra/powergate-t12x.c
@@ -810,8 +810,8 @@ bool tegra12x_powergate_skip(int id)
switch (id) {
#ifdef CONFIG_ARCH_TEGRA_HAS_SATA
case TEGRA_POWERGATE_SATA:
-#endif
return true;
+#endif
default:
return false;
diff --git a/arch/arm/mach-tegra/tegra11_soctherm.c b/arch/arm/mach-tegra/tegra11_soctherm.c
index 5a2140234ec1..f1d24c278e74 100644
--- a/arch/arm/mach-tegra/tegra11_soctherm.c
+++ b/arch/arm/mach-tegra/tegra11_soctherm.c
@@ -2975,7 +2975,8 @@ static int soctherm_init_platform_data(void)
for (i = 0; i < TSENSE_SIZE; i++) {
therm = &plat_data.therm[tsensor2therm_map[i]];
s = &plat_data.sensor_data[i];
- s->sensor_enable = s->sensor_enable ?: therm->zone_enable;
+ if (!(s->sensor_enable))
+ s->sensor_enable = therm->zone_enable;
s->tall = s->tall ?: sensor_defaults.tall;
s->tiddq = s->tiddq ?: sensor_defaults.tiddq;
s->ten_count = s->ten_count ?: sensor_defaults.ten_count;
diff --git a/arch/arm/mach-tegra/tegra12_clocks.c b/arch/arm/mach-tegra/tegra12_clocks.c
index bb738a86b51a..0546bd6a6c1c 100644
--- a/arch/arm/mach-tegra/tegra12_clocks.c
+++ b/arch/arm/mach-tegra/tegra12_clocks.c
@@ -1331,7 +1331,7 @@ static struct clk_ops tegra13_super_cclk_ops = {
*/
static void tegra12_cpu_clk_init(struct clk *c)
{
- c->state = (!is_lp_cluster() == (c->u.cpu.mode == MODE_G))? ON : OFF;
+ c->state = ((!is_lp_cluster()) == (c->u.cpu.mode == MODE_G))? ON : OFF;
}
static int tegra12_cpu_clk_enable(struct clk *c)
diff --git a/arch/arm/mach-tegra/tegra_cl_dvfs.c b/arch/arm/mach-tegra/tegra_cl_dvfs.c
index 0b2f50a0e8d2..82fac33f2a73 100644
--- a/arch/arm/mach-tegra/tegra_cl_dvfs.c
+++ b/arch/arm/mach-tegra/tegra_cl_dvfs.c
@@ -901,7 +901,7 @@ static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
{
- u32 val, data;
+ u32 val, data = 0;
ktime_t now;
unsigned long rate;
unsigned long step = RATE_STEP(cld);
@@ -2948,7 +2948,7 @@ DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
static int monitor_get(void *data, u64 *val)
{
- u32 v, s;
+ u32 v = 0, s;
unsigned long flags;
struct clk *c = (struct clk *)data;
struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 0737a7a7a992..72dd967ecd12 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1009,4 +1009,5 @@ eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573
domotab MACH_DOMOTAB DOMOTAB 4574
pfla03 MACH_PFLA03 PFLA03 4575
ardbeg MACH_ARDBEG ARDBEG 4602
+apalis_tk1 MACH_APALIS_TK1 APALIS_TK1 4609
vcm30t124 MACH_VCM30_T124 VCM30_T124 4616
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 42e0c329191c..879838efcd7d 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -590,7 +590,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_alloc;
}
- ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+ ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
+ irq_flags | IRQF_ONESHOT,
chip->name, d);
if (ret != 0) {
dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
@@ -620,14 +621,35 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
*
* @irq: Primary IRQ for the device
* @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also dispose all mapped irq on chip.
*/
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
+ unsigned int virq;
+ int hwirq;
+
if (!d)
return;
free_irq(irq, d);
- /* We should unmap the domain but... */
+
+ /* Dispose all virtual irq from irq domain before removing it */
+ for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+ /* Ignore hwirq if holes in the IRQ list */
+ if (!d->chip->irqs[hwirq].mask)
+ continue;
+
+ /*
+ * Find the virtual irq of hwirq on chip and if it is
+ * there then dispose it
+ */
+ virq = irq_find_mapping(d->domain, hwirq);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(d->domain);
kfree(d->type_buf);
kfree(d->type_buf_def);
kfree(d->wake_buf);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0b43b260390b..4d45a42f1d00 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1300,10 +1300,10 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
*/
if (map->use_single_rw) {
for (i = 0; i < val_count; i++) {
- ret = regmap_raw_write(map,
+ ret = _regmap_raw_write(map,
reg + (i * map->reg_stride),
val + (i * val_bytes),
- val_bytes);
+ val_bytes, false);
if (ret != 0)
return ret;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 59d5f24dc113..2bb28efe1e08 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -643,6 +643,12 @@ config GPIO_RDC321X
comment "SPI GPIO expanders:"
+config GPIO_APALIS_TK1_K20
+ tristate "GPIOs of K20 MCU on Apalis TK1"
+ depends on MFD_APALIS_TK1_K20
+ help
+ This enables support for GPIOs of K20 MCU found on Apalis TK1.
+
config GPIO_MAX7301
tristate "Maxim MAX7301 GPIO expander"
depends on SPI_MASTER
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e5a7fe6f0ea8..1134650de379 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_GPIO_ADNP) += gpio-adnp.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
+obj-$(CONFIG_GPIO_APALIS_TK1_K20) += gpio-apalis-tk1-k20.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
diff --git a/drivers/gpio/gpio-apalis-tk1-k20.c b/drivers/gpio/gpio-apalis-tk1-k20.c
new file mode 100644
index 000000000000..f914cf0d8ded
--- /dev/null
+++ b/drivers/gpio/gpio-apalis-tk1-k20.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/mfd/apalis-tk1-k20.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+struct apalis_tk1_k20_gpio {
+ struct gpio_chip chip;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+};
+
+static int apalis_tk1_k20_gpio_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct apalis_tk1_k20_gpio *gpio = container_of(chip,
+ struct apalis_tk1_k20_gpio, chip);
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_STA,
+ 0);
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_gpio_output(struct gpio_chip *chip,
+ unsigned int offset,
+ int value)
+{
+ struct apalis_tk1_k20_gpio *gpio = container_of(chip,
+ struct apalis_tk1_k20_gpio, chip);
+ int status;
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ status = APALIS_TK1_K20_GPIO_STA_OE;
+ status += value ? APALIS_TK1_K20_GPIO_STA_VAL : 0;
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_STA,
+ status);
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct apalis_tk1_k20_gpio *gpio = container_of(chip,
+ struct apalis_tk1_k20_gpio, chip);
+ int value;
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ if (apalis_tk1_k20_reg_read(gpio->apalis_tk1_k20,
+ APALIS_TK1_K20_GPIO_STA, &value) < 0) {
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+ return -ENODEV;
+ }
+ value &= APALIS_TK1_K20_GPIO_STA_VAL;
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+
+ return value ? 1 : 0;
+}
+
+static int apalis_tk1_k20_gpio_request(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct apalis_tk1_k20_gpio *gpio = container_of(chip,
+ struct apalis_tk1_k20_gpio, chip);
+ int status = 0;
+
+ dev_dbg(gpio->apalis_tk1_k20->dev, "APALIS TK1 K20 GPIO %s\n",
+ __func__);
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ if (apalis_tk1_k20_reg_read(gpio->apalis_tk1_k20,
+ APALIS_TK1_K20_GPIO_NO, &status) < 0) {
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+ return -ENODEV;
+ }
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+
+ return status;
+}
+
+static void apalis_tk1_k20_gpio_free(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct apalis_tk1_k20_gpio *gpio =
+ container_of(chip, struct apalis_tk1_k20_gpio, chip);
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20,
+ APALIS_TK1_K20_GPIO_STA, 0);
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+}
+
+
+static void apalis_tk1_k20_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct apalis_tk1_k20_gpio *gpio = container_of(chip,
+ struct apalis_tk1_k20_gpio, chip);
+ int status;
+
+ apalis_tk1_k20_lock(gpio->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_NO,
+ offset);
+ if (apalis_tk1_k20_reg_read(gpio->apalis_tk1_k20,
+ APALIS_TK1_K20_GPIO_STA, &status) < 0) {
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+ return;
+ }
+
+ status &= ~APALIS_TK1_K20_GPIO_STA_VAL;
+ status += value ? APALIS_TK1_K20_GPIO_STA_VAL : 0;
+ apalis_tk1_k20_reg_write(gpio->apalis_tk1_k20, APALIS_TK1_K20_GPIO_STA,
+ status);
+
+ apalis_tk1_k20_unlock(gpio->apalis_tk1_k20);
+}
+
+static int __init apalis_tk1_k20_gpio_probe(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_gpio *priv;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+ int status;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ apalis_tk1_k20 = dev_get_drvdata(pdev->dev.parent);
+ if (!apalis_tk1_k20)
+ return -ENODEV;
+ priv->apalis_tk1_k20 = apalis_tk1_k20;
+
+ platform_set_drvdata(pdev, priv);
+
+ apalis_tk1_k20_lock(apalis_tk1_k20);
+
+ /* TBD: some code */
+
+ apalis_tk1_k20_unlock(apalis_tk1_k20);
+
+ priv->chip.base = -1;
+ priv->chip.can_sleep = 1;
+ priv->chip.dev = &pdev->dev;
+ priv->chip.owner = THIS_MODULE;
+ priv->chip.get = apalis_tk1_k20_gpio_get;
+ priv->chip.set = apalis_tk1_k20_gpio_set;
+ priv->chip.direction_input = apalis_tk1_k20_gpio_input;
+ priv->chip.direction_output = apalis_tk1_k20_gpio_output;
+ priv->chip.request = apalis_tk1_k20_gpio_request;
+ priv->chip.free = apalis_tk1_k20_gpio_free;
+ /* TODO: include as a define somewhere */
+ priv->chip.ngpio = 160;
+
+ status = gpiochip_add(&priv->chip);
+
+ return status;
+}
+
+static int __exit apalis_tk1_k20_gpio_remove(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_gpio *priv = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&priv->chip);
+}
+
+static const struct platform_device_id apalis_tk1_k20_gpio_idtable[] = {
+ {
+ .name = "apalis-tk1-k20-gpio",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, apalis_tk1_k20_gpio_idtable);
+
+static struct platform_driver apalis_tk1_k20_gpio_driver = {
+ .id_table = apalis_tk1_k20_gpio_idtable,
+ .remove = __exit_p(apalis_tk1_k20_gpio_remove),
+ .driver = {
+ .name = "apalis-tk1-k20-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver_probe(apalis_tk1_k20_gpio_driver,
+ &apalis_tk1_k20_gpio_probe);
+
+MODULE_DESCRIPTION("GPIO driver for K20 MCU on Apalis TK1");
+MODULE_AUTHOR("Dominik Sliwa <dominik.sliwa@toradex.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 2a5bd760f82d..2bbd973ad0b1 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -121,7 +121,6 @@ static const u32 gmmu_page_sizes[gmmu_nr_page_sizes] = { SZ_4K, SZ_128K };
static const u32 gmmu_page_shifts[gmmu_nr_page_sizes] = { 12, 17 };
static const u64 gmmu_page_offset_masks[gmmu_nr_page_sizes] = { 0xfffLL,
0x1ffffLL };
-static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
struct gk20a_comptags {
u32 offset;
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index 231c32a52dd1..18a64e1fabef 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -341,16 +341,6 @@ static const u32 gk20a_runcontrol_whitelist[] = {
static const u32 gk20a_runcontrol_whitelist_count =
ARRAY_SIZE(gk20a_runcontrol_whitelist);
-static const struct regop_offset_range gk20a_runcontrol_whitelist_ranges[] = {
- { 0x00419e10, 1 },
- { 0x0041c610, 1 },
- { 0x00501e10, 1 },
- { 0x00504610, 1 },
-};
-static const u32 gk20a_runcontrol_whitelist_ranges_count =
- ARRAY_SIZE(gk20a_runcontrol_whitelist_ranges);
-
-
/* quad ctl */
static const u32 gk20a_qctl_whitelist[] = {
0x00504670,
@@ -366,16 +356,6 @@ static const u32 gk20a_qctl_whitelist[] = {
static const u32 gk20a_qctl_whitelist_count =
ARRAY_SIZE(gk20a_qctl_whitelist);
-static const struct regop_offset_range gk20a_qctl_whitelist_ranges[] = {
- { 0x00504670, 1 },
- { 0x00504730, 4 },
-};
-static const u32 gk20a_qctl_whitelist_ranges_count =
- ARRAY_SIZE(gk20a_qctl_whitelist_ranges);
-
-
-
-
static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
u32 *ctx_rd_count, u32 *ctx_wr_count,
struct nvhost_dbg_gpu_reg_op *ops,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ab0767e6727e..5a69e23c4ffb 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -94,6 +94,12 @@ config AD7887
To compile this driver as a module, choose M here: the
module will be called ad7887.
+config APALIS_TK1_K20_ADC
+ tristate "ADCs of K20 MCU on Apalis TK1"
+ depends on MFD_APALIS_TK1_K20
+ help
+ This enables support for ADCs of K20 MCU found on Apalis TK1.
+
config AT91_ADC
tristate "Atmel AT91 ADC"
depends on ARCH_AT91
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0a825bed43f6..c9150844cbc2 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_AD7476) += ad7476.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
+obj-$(CONFIG_APALIS_TK1_K20_ADC) += apalis-tk1-k20_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
diff --git a/drivers/iio/adc/apalis-tk1-k20_adc.c b/drivers/iio/adc/apalis-tk1-k20_adc.c
new file mode 100644
index 000000000000..d87cf61059cc
--- /dev/null
+++ b/drivers/iio/adc/apalis-tk1-k20_adc.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/mfd/apalis-tk1-k20.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/iio/machine.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct apalis_tk1_k20_adc {
+ struct iio_dev chip;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+};
+
+static int apalis_tk1_k20_get_adc_result(struct apalis_tk1_k20_adc *adc, int id,
+ int *val)
+{
+ uint8_t adc_read[2];
+ int adc_register;
+
+ switch (id) {
+ case APALIS_TK1_K20_ADC1:
+ adc_register = APALIS_TK1_K20_ADC_CH0L;
+ break;
+ case APALIS_TK1_K20_ADC2:
+ adc_register = APALIS_TK1_K20_ADC_CH1L;
+ break;
+ case APALIS_TK1_K20_ADC3:
+ adc_register = APALIS_TK1_K20_ADC_CH2L;
+ break;
+ case APALIS_TK1_K20_ADC4:
+ adc_register = APALIS_TK1_K20_ADC_CH3L;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ apalis_tk1_k20_lock(adc->apalis_tk1_k20);
+
+ if (apalis_tk1_k20_reg_read_bulk(adc->apalis_tk1_k20, adc_register,
+ adc_read, 2) < 0) {
+ apalis_tk1_k20_unlock(adc->apalis_tk1_k20);
+ return -EIO;
+ }
+ *val = (adc_read[1] << 8) + adc_read[0];
+
+ apalis_tk1_k20_unlock(adc->apalis_tk1_k20);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct apalis_tk1_k20_adc *adc = iio_priv(indio_dev);
+ enum apalis_tk1_k20_adc_id id = chan->channel;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = apalis_tk1_k20_get_adc_result(adc, id, val) ?
+ -EIO : IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = APALIS_TK1_K20_VADC_MILI;
+ *val2 = APALIS_TK1_K20_ADC_BITS;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct iio_info apalis_tk1_k20_adc_info = {
+ .read_raw = &apalis_tk1_k20_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+#define APALIS_TK1_K20_CHAN(_id, _type) { \
+ .type = _type, \
+ .indexed = 1, \
+ .channel = APALIS_TK1_K20_##_id, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = #_id, \
+}
+
+static const struct iio_chan_spec apalis_tk1_k20_adc_channels[] = {
+ [APALIS_TK1_K20_ADC1] = APALIS_TK1_K20_CHAN(ADC1, IIO_VOLTAGE),
+ [APALIS_TK1_K20_ADC2] = APALIS_TK1_K20_CHAN(ADC2, IIO_VOLTAGE),
+ [APALIS_TK1_K20_ADC3] = APALIS_TK1_K20_CHAN(ADC3, IIO_VOLTAGE),
+ [APALIS_TK1_K20_ADC4] = APALIS_TK1_K20_CHAN(ADC4, IIO_VOLTAGE),
+};
+
+
+static int __init apalis_tk1_k20_adc_probe(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_adc *priv;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = iio_device_alloc(sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ apalis_tk1_k20 = dev_get_drvdata(pdev->dev.parent);
+ if (!apalis_tk1_k20) {
+ ret = -ENODEV;
+ goto err_iio_device;
+ }
+
+ priv = iio_priv(indio_dev);
+ priv->apalis_tk1_k20 = apalis_tk1_k20;
+
+ apalis_tk1_k20_lock(apalis_tk1_k20);
+
+ /* Enable ADC */
+ apalis_tk1_k20_reg_write(apalis_tk1_k20, APALIS_TK1_K20_ADCREG, 0x01);
+
+ apalis_tk1_k20_unlock(apalis_tk1_k20);
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &apalis_tk1_k20_adc_info;
+ indio_dev->channels = apalis_tk1_k20_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(apalis_tk1_k20_adc_channels);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "iio dev register err: %d\n", ret);
+ goto err_iio_device;
+ }
+
+ return 0;
+
+err_iio_device:
+ iio_device_free(indio_dev);
+ return ret;
+}
+
+static int __exit apalis_tk1_k20_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20 = dev_get_drvdata(
+ pdev->dev.parent);
+
+ apalis_tk1_k20_lock(apalis_tk1_k20);
+
+ /* Disable ADC */
+ apalis_tk1_k20_reg_write(apalis_tk1_k20, APALIS_TK1_K20_ADCREG, 0x00);
+
+ apalis_tk1_k20_unlock(apalis_tk1_k20);
+
+ iio_device_unregister(indio_dev);
+ iio_device_free(indio_dev);
+
+ return 0;
+}
+
+static const struct platform_device_id apalis_tk1_k20_adc_idtable[] = {
+ {
+ .name = "apalis-tk1-k20-adc",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, apalis_tk1_k20_adc_idtable);
+
+static struct platform_driver apalis_tk1_k20_adc_driver = {
+ .id_table = apalis_tk1_k20_adc_idtable,
+ .remove = __exit_p(apalis_tk1_k20_adc_remove),
+ .driver = {
+ .name = "apalis-tk1-k20-adc",
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver_probe(apalis_tk1_k20_adc_driver,
+ &apalis_tk1_k20_adc_probe);
+
+MODULE_DESCRIPTION("K20 ADCs on Apalis TK1");
+MODULE_AUTHOR("Dominik Sliwa");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 36604667f2ae..f8d6506917ed 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -86,6 +86,16 @@ config TOUCHSCREEN_AD7879_SPI
To compile this driver as a module, choose M here: the
module will be called ad7879-spi.
+config TOUCHSCREEN_APALIS_TK1_K20
+ tristate "K20 based touchscreen controller on Apalis TK1"
+ depends on MFD_APALIS_TK1_K20
+ help
+ Say Y here if you want to support the touchscreen controller
+ implemented in the K20 found on Apalis TK1.
+
+ To compile this driver as a module, choose M here: the module will be
+ called apalis-tk1-k20_ts.
+
config TOUCHSCREEN_ATMEL_MXT
tristate "Atmel mXT I2C Touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 3d7467601b5c..bc35df0ee270 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_APALIS_TK1_K20) += apalis-tk1-k20_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
diff --git a/drivers/input/touchscreen/apalis-tk1-k20_ts.c b/drivers/input/touchscreen/apalis-tk1-k20_ts.c
new file mode 100644
index 000000000000..55aaa6f07fb9
--- /dev/null
+++ b/drivers/input/touchscreen/apalis-tk1-k20_ts.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * Based on driver for the Freescale Semiconductor MC13783 touchscreen by:
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2009 Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mfd/apalis-tk1-k20.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#define APALIS_TK1_K20_TS_NAME "apalis-tk1-k20-ts"
+
+struct apalis_tk1_k20_ts {
+ struct input_dev *idev;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+ struct delayed_work work;
+ struct workqueue_struct *workq;
+ uint16_t sample[4];
+};
+
+static irqreturn_t apalis_tk1_k20_ts_handler(int irq, void *data)
+{
+ struct apalis_tk1_k20_ts *priv = data;
+
+ /*
+ * Kick off reading coordinates. Note that if work happens already
+ * be queued for future execution (it rearms itself) it will not
+ * be rescheduled for immediate execution here. However the rearm
+ * delay is HZ / 25 which is acceptable.
+ */
+ queue_delayed_work(priv->workq, &priv->work, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void apalis_tk1_k20_ts_report_sample(struct apalis_tk1_k20_ts *priv)
+{
+ struct input_dev *idev = priv->idev;
+ int xp, xm, yp, ym;
+ int x, y, press, btn;
+
+ xp = priv->sample[1];
+ xm = priv->sample[0];
+ yp = priv->sample[3];
+ ym = priv->sample[2];
+
+ x = (xp + xm) / 2;
+ y = (yp + ym) / 2;
+ press = (abs(yp - ym) + abs(xp - xm)) / 2;
+
+ if ((yp != 0) && (xp != 0)) {
+ btn = 1;
+ input_report_abs(idev, ABS_X, x);
+ input_report_abs(idev, ABS_Y, y);
+
+ dev_dbg(&idev->dev, "report (%d, %d, %d)\n",
+ x, y, press);
+ queue_delayed_work(priv->workq, &priv->work, HZ / 25);
+ } else {
+ dev_dbg(&idev->dev, "report release\n");
+ btn = 0;
+ }
+
+ input_report_abs(idev, ABS_PRESSURE, press);
+ input_report_key(idev, BTN_TOUCH, btn);
+ input_sync(idev);
+}
+
+static void apalis_tk1_k20_ts_work(struct work_struct *work)
+{
+ struct apalis_tk1_k20_ts *priv =
+ container_of(work, struct apalis_tk1_k20_ts, work.work);
+ uint8_t buf[8], i;
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+
+ if (apalis_tk1_k20_reg_read_bulk(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_TSC_XML, buf, 8) < 0) {
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ dev_err(&priv->idev->dev, "Error reading data\n");
+ return;
+ }
+
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+
+ for (i = 0; i < 4; i++)
+ priv->sample[i] = (buf[(2 * i) + 1] << 8) + buf[2 * i];
+
+ apalis_tk1_k20_ts_report_sample(priv);
+}
+
+static int apalis_tk1_k20_ts_open(struct input_dev *dev)
+{
+ struct apalis_tk1_k20_ts *priv = input_get_drvdata(dev);
+ int ret;
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+
+ ret = apalis_tk1_k20_irq_request(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_TSC_IRQ, apalis_tk1_k20_ts_handler,
+ APALIS_TK1_K20_TS_NAME, priv);
+ if (ret)
+ goto out;
+
+ ret = apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_TSCREG, APALIS_TK1_K20_TSC_ENA_MASK,
+ APALIS_TK1_K20_TSC_ENA);
+ if (ret)
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_TSC_IRQ, priv);
+
+out:
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+
+ return ret;
+}
+
+static void apalis_tk1_k20_ts_close(struct input_dev *dev)
+{
+ struct apalis_tk1_k20_ts *priv = input_get_drvdata(dev);
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20, APALIS_TK1_K20_TSCREG,
+ APALIS_TK1_K20_TSC_ENA_MASK, 0);
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20, APALIS_TK1_K20_TSC_IRQ,
+ priv);
+
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+
+ cancel_delayed_work_sync(&priv->work);
+}
+
+static int __init apalis_tk1_k20_ts_probe(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_ts *priv;
+ struct input_dev *idev;
+ int ret = -ENOMEM;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ idev = input_allocate_device();
+ if (!priv || !idev)
+ goto err_free_mem;
+
+ INIT_DELAYED_WORK(&priv->work, apalis_tk1_k20_ts_work);
+
+ priv->apalis_tk1_k20 = dev_get_drvdata(pdev->dev.parent);
+ priv->idev = idev;
+
+ priv->workq = create_singlethread_workqueue("apalis_tk1_k20_ts");
+ if (!priv->workq)
+ goto err_free_mem;
+
+ idev->name = APALIS_TK1_K20_TS_NAME;
+ idev->dev.parent = &pdev->dev;
+
+ idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_abs_params(idev, ABS_X, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, 0xfff, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0xfff, 0, 0);
+
+ idev->open = apalis_tk1_k20_ts_open;
+ idev->close = apalis_tk1_k20_ts_close;
+
+ input_set_drvdata(idev, priv);
+
+ ret = input_register_device(priv->idev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "register input device failed with %d\n", ret);
+ goto err_destroy_wq;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+
+err_destroy_wq:
+ destroy_workqueue(priv->workq);
+err_free_mem:
+ input_free_device(idev);
+ kfree(priv);
+
+ return ret;
+}
+
+static int __exit apalis_tk1_k20_ts_remove(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_ts *priv = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ destroy_workqueue(priv->workq);
+ input_unregister_device(priv->idev);
+ kfree(priv);
+
+ return 0;
+}
+
+static const struct platform_device_id apalis_tk1_k20_ts_idtable[] = {
+ {
+ .name = "apalis-tk1-k20-ts",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, apalis_tk1_k20_ts_idtable);
+
+static struct platform_driver apalis_tk1_k20_ts_driver = {
+ .id_table = apalis_tk1_k20_ts_idtable,
+ .remove = __exit_p(apalis_tk1_k20_ts_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = APALIS_TK1_K20_TS_NAME,
+ },
+};
+
+module_platform_driver_probe(apalis_tk1_k20_ts_driver,
+ &apalis_tk1_k20_ts_probe);
+
+MODULE_DESCRIPTION("K20 touchscreen controller on Apalis TK1");
+MODULE_AUTHOR("Dominik Sliwa <dominik.sliwa@toradex.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/Kconfig b/drivers/media/i2c/soc_camera/Kconfig
index 4e8b50bf64ec..085553dc2d00 100644
--- a/drivers/media/i2c/soc_camera/Kconfig
+++ b/drivers/media/i2c/soc_camera/Kconfig
@@ -12,6 +12,12 @@ config SOC_CAMERA_AR0330
help
This driver supports AR0330 camera from Aptina
+config SOC_CAMERA_AP1302
+ tristate "AP1302 ISP support"
+ depends on SOC_CAMERA && I2C
+ help
+ This driver supports AP1302 ISP from Aptina
+
config SOC_CAMERA_IMX074
tristate "imx074 support"
depends on SOC_CAMERA && I2C
@@ -115,3 +121,15 @@ config SOC_CAMERA_TW9910
depends on SOC_CAMERA && I2C
help
This is a tw9910 video driver
+
+config SOC_CAMERA_TC358743
+ tristate "tc358743 support"
+ depends on SOC_CAMERA && I2C
+ help
+ This is a tc358743 video driver
+
+config SOC_CAMERA_ADV7280
+ tristate "adv7280m support"
+ depends on SOC_CAMERA && I2C
+ help
+ This is a adv7280 video decoder driver
diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile
index faea5137dc83..9bf568334d84 100644
--- a/drivers/media/i2c/soc_camera/Makefile
+++ b/drivers/media/i2c/soc_camera/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_SOC_CAMERA_OV9640) += ov9640.o
obj-$(CONFIG_SOC_CAMERA_OV9740) += ov9740.o
obj-$(CONFIG_SOC_CAMERA_RJ54N1) += rj54n1cb0c.o
obj-$(CONFIG_SOC_CAMERA_TW9910) += tw9910.o
+obj-$(CONFIG_SOC_CAMERA_AP1302) += ap1302.o
+obj-$(CONFIG_SOC_CAMERA_TC358743) += tc358743.o
+obj-$(CONFIG_SOC_CAMERA_ADV7280) += adv7280.o
diff --git a/drivers/media/i2c/soc_camera/adv7280.c b/drivers/media/i2c/soc_camera/adv7280.c
new file mode 100644
index 000000000000..d5e7391b6b75
--- /dev/null
+++ b/drivers/media/i2c/soc_camera/adv7280.c
@@ -0,0 +1,834 @@
+/*
+ * ADV7280 camera decoder driver, based on ADV7180 driver
+ *
+ * Copyright (c) 2014 Antmicro Ltd <www.antmicro.com>
+ * Based on ADV7180 video decoder driver,
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/soc_camera.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+
+#define DRIVER_NAME "adv7280"
+
+#define HW_DEINT /* Enable hardware deinterlacer */
+#define CSI
+#define VPP_SLAVE_ADDRESS 0x42
+#define CSI_SLAVE_ADDRESS 0x44
+
+/* User Sub Map Regs */
+#define ADV7280_INPUT_CONTROL 0x00
+#define ADV7280_VIDEO_SELECTION_1 0x01
+#define ADV7280_VIDEO_SELECTION_2 0x02
+#define ADV7280_OUTPUT_CONTROL 0x03
+#define ADV7280_EXTENDED_OUTPUT_CONTROL 0x04
+#define ADV7280_AUTODETECT_ENABLE 0x07
+#define ADV7280_ADI_CONTROL_1 0x0E
+#define ADV7280_POWER_MANAGEMENT 0x0F
+#define ADV7280_STATUS_1 0x10
+#define ADV7280_IDENT 0x11
+#define ADV7280_SHAPING_FILTER_CONTROL_1 0x17
+#define ADV7280_ADI_CONTROL_2 0x1D
+#define ADV7280_PIXEL_DELAY_CONTROL 0x27
+#define ADV7280_VPP_SLAVE_ADDRESS 0xFD
+#define ADV7280_CSI_TX_SLAVE_ADDRESS 0xFE
+#define ADV7280_OUTPUT_SYNC_SELECT_2 0x6B
+
+/* VPP regs (Video Postprocessor) */
+#define VPP_DEINT_RESET 0x41
+#define VPP_I2C_DEINT_ENABLE 0x55
+#define VPP_ADV_TIMING_MODE_EN 0x5B
+
+/* CSI regs (Camera Serial Interface, MIPI-CSI2) */
+#define CSI_CSITX_PWRDN 0x00
+#define CSI_DPHY_PWRDN_CTL 0xDE
+
+/* other */
+#define ADV7280_EXTENDED_OUTPUT_CONTROL_NTSCDIS 0xC5
+#define ADV7280_AUTODETECT_DEFAULT 0x7f
+
+#define ADV7280_INPUT_CONTROL_COMPOSITE_IN1 0x00
+#define ADV7280_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
+
+#define ADV7280_INPUT_CONTROL_NTSC_M 0x50
+#define ADV7280_INPUT_CONTROL_PAL60 0x60
+#define ADV7280_INPUT_CONTROL_NTSC_443 0x70
+#define ADV7280_INPUT_CONTROL_PAL_BG 0x80
+#define ADV7280_INPUT_CONTROL_PAL_N 0x90
+#define ADV7280_INPUT_CONTROL_PAL_M 0xa0
+#define ADV7280_INPUT_CONTROL_PAL_M_PED 0xb0
+#define ADV7280_INPUT_CONTROL_PAL_COMB_N 0xc0
+#define ADV7280_INPUT_CONTROL_PAL_COMB_N_PED 0xd0
+#define ADV7280_INPUT_CONTROL_PAL_SECAM 0xe0
+
+#define ADV7280_ADI_CTRL_IRQ_SPACE 0x20
+
+#define ADV7280_STATUS1_IN_LOCK 0x01
+#define ADV7280_STATUS1_AUTOD_MASK 0x70
+#define ADV7280_STATUS1_AUTOD_NTSM_M_J 0x00
+#define ADV7280_STATUS1_AUTOD_NTSC_4_43 0x10
+#define ADV7280_STATUS1_AUTOD_PAL_M 0x20
+#define ADV7280_STATUS1_AUTOD_PAL_60 0x30
+#define ADV7280_STATUS1_AUTOD_PAL_B_G 0x40
+#define ADV7280_STATUS1_AUTOD_SECAM 0x50
+#define ADV7280_STATUS1_AUTOD_PAL_COMB 0x60
+#define ADV7280_STATUS1_AUTOD_SECAM_525 0x70
+
+#define ADV7280_ICONF1_ADI 0x40
+#define ADV7280_ICONF1_ACTIVE_LOW 0x01
+#define ADV7280_ICONF1_PSYNC_ONLY 0x10
+
+#define ADV7280_IMR1_ADI 0x44
+#define ADV7280_IMR2_ADI 0x48
+#define ADV7280_IRQ3_AD_CHANGE 0x08
+#define ADV7280_ISR3_ADI 0x4A
+#define ADV7280_ICR3_ADI 0x4B
+#define ADV7280_IMR3_ADI 0x4C
+#define ADV7280_IMR4_ADI 0x50
+
+struct adv7280_state {
+ struct v4l2_subdev sd;
+ struct work_struct work;
+ struct mutex mutex; /* mutual excl. when accessing chip */
+ int irq;
+ v4l2_std_id curr_norm;
+ bool autodetect;
+ int active_input;
+};
+
+static v4l2_std_id adv7280_std_to_v4l2(u8 status1)
+{
+ switch (status1 & ADV7280_STATUS1_AUTOD_MASK) {
+ case ADV7280_STATUS1_AUTOD_NTSM_M_J:
+ return V4L2_STD_NTSC;
+ case ADV7280_STATUS1_AUTOD_NTSC_4_43:
+ return V4L2_STD_NTSC_443;
+ case ADV7280_STATUS1_AUTOD_PAL_M:
+ return V4L2_STD_PAL_M;
+ case ADV7280_STATUS1_AUTOD_PAL_60:
+ return V4L2_STD_PAL_60;
+ case ADV7280_STATUS1_AUTOD_PAL_B_G:
+ return V4L2_STD_PAL;
+ case ADV7280_STATUS1_AUTOD_SECAM:
+ return V4L2_STD_SECAM;
+ case ADV7280_STATUS1_AUTOD_PAL_COMB:
+ return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
+ case ADV7280_STATUS1_AUTOD_SECAM_525:
+ return V4L2_STD_SECAM;
+ default:
+ return V4L2_STD_UNKNOWN;
+ }
+}
+
+static int v4l2_std_to_adv7280(v4l2_std_id std)
+{
+ if (std == V4L2_STD_PAL_60)
+ return ADV7280_INPUT_CONTROL_PAL60;
+ if (std == V4L2_STD_NTSC_443)
+ return ADV7280_INPUT_CONTROL_NTSC_443;
+ if (std == V4L2_STD_PAL_N)
+ return ADV7280_INPUT_CONTROL_PAL_N;
+ if (std == V4L2_STD_PAL_M)
+ return ADV7280_INPUT_CONTROL_PAL_M;
+ if (std == V4L2_STD_PAL_Nc)
+ return ADV7280_INPUT_CONTROL_PAL_COMB_N;
+
+ if (std & V4L2_STD_PAL)
+ return ADV7280_INPUT_CONTROL_PAL_BG;
+ if (std & V4L2_STD_NTSC)
+ return ADV7280_INPUT_CONTROL_NTSC_M;
+ if (std & V4L2_STD_SECAM)
+ return ADV7280_INPUT_CONTROL_PAL_SECAM;
+
+ return -EINVAL;
+}
+
+static int adv7280_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ i2c_smbus_write_byte_data(client, reg, val);
+ return 0;
+}
+
+static u32 adv7280_status_to_v4l2(u8 status1)
+{
+ if (!(status1 & ADV7280_STATUS1_IN_LOCK))
+ return V4L2_IN_ST_NO_SIGNAL;
+
+ return 0;
+}
+
+static int __adv7280_status(struct i2c_client *client, u32 *status,
+ v4l2_std_id *std)
+{
+ int status1 = i2c_smbus_read_byte_data(client, ADV7280_STATUS_1);
+
+ if (status1 < 0)
+ return status1;
+
+ if (status)
+ *status = adv7280_status_to_v4l2(status1);
+ if (std)
+ *std = adv7280_std_to_v4l2(status1);
+
+ return 0;
+}
+
+static inline struct adv7280_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7280_state, sd);
+}
+
+static int adv7280_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct adv7280_state *state = to_state(sd);
+ int err = mutex_lock_interruptible(&state->mutex);
+ if (err)
+ return err;
+
+ /* when we are interrupt driven we know the state */
+ if (!state->autodetect || state->irq > 0)
+ *std = state->curr_norm;
+ else
+ err = __adv7280_status(v4l2_get_subdevdata(sd), NULL, std);
+
+ mutex_unlock(&state->mutex);
+ return err;
+}
+
+static int adv7280_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct adv7280_state *state = to_state(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ ret = __adv7280_status(v4l2_get_subdevdata(sd), status, NULL);
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+/*
+static int adv7280_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7280, 0);
+}
+*/
+
+static int adv7280_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv7280_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+
+ /* all standards -> autodetect */
+ if (std == V4L2_STD_ALL) {
+ ret = adv7280_write_reg(client,
+ ADV7280_INPUT_CONTROL,
+ ADV7280_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM |
+ (ADV7280_INPUT_CONTROL_COMPOSITE_IN1 +
+ state->active_input));
+ if (ret < 0)
+ goto out;
+
+ __adv7280_status(client, NULL, &state->curr_norm);
+ state->autodetect = true;
+ } else {
+ ret = v4l2_std_to_adv7280(std) |
+ (ADV7280_INPUT_CONTROL_COMPOSITE_IN1 +
+ state->active_input);
+ if (ret < 0)
+ goto out;
+
+ ret = adv7280_write_reg(client,
+ ADV7280_INPUT_CONTROL, ret);
+ if (ret < 0)
+ goto out;
+
+ state->curr_norm = std;
+ state->autodetect = false;
+ }
+ ret = 0;
+out:
+ mutex_unlock(&state->mutex);
+ return ret;
+}
+
+/*
+static int adv7280_set_bus_param(struct soc_camera_device *icd,
+ unsigned long flags)
+{
+ return 0;
+}
+*/
+
+/* Request bus settings on camera side */
+/*
+static unsigned long adv7280_query_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_link *icl = to_soc_camera_link(icd);
+
+ unsigned long flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_MASTER |
+ SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_HIGH |
+ SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8;
+
+ return soc_camera_apply_sensor_flags(icl, flags);
+}
+*/
+
+static enum v4l2_mbus_pixelcode adv7280_codes[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+static int adv7280_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ enum v4l2_colorspace cspace;
+ enum v4l2_mbus_pixelcode code = mf->code;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_UYVY8_2X8:
+ cspace = V4L2_COLORSPACE_SMPTE170M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mf->code = code;
+ mf->colorspace = cspace;
+
+ return adv7280_s_std(sd, V4L2_STD_ALL);
+}
+
+static int adv7280_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+#ifdef HW_DEINT
+ mf->field = V4L2_FIELD_NONE;
+#else
+ mf->field = V4L2_FIELD_INTERLACED_TB;
+#endif
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
+
+ /* PAL, line aligned to 64B */
+ mf->width = 736;
+ mf->height = 576;
+
+ return 0;
+}
+
+static int adv7280_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_subdev_desc *scsd = soc_camera_i2c_to_desc(client);
+
+ return soc_camera_set_power(&client->dev, scsd, on);
+}
+
+static int adv7280_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(adv7280_codes))
+ return -EINVAL;
+
+ *code = adv7280_codes[index];
+
+ return 0;
+}
+
+/*
+static struct soc_camera_ops adv7280_ops = {
+ .set_bus_param = adv7280_set_bus_param,
+ .query_bus_param = adv7280_query_bus_param,
+};
+*/
+
+static const struct v4l2_subdev_video_ops adv7280_video_ops = {
+ .s_mbus_fmt = adv7280_s_fmt,
+ .try_mbus_fmt = adv7280_try_fmt,
+ .enum_mbus_fmt = adv7280_enum_fmt,
+ .querystd = adv7280_querystd,
+ .g_input_status = adv7280_g_input_status,
+};
+
+static const struct v4l2_subdev_core_ops adv7280_core_ops = {
+ //.g_chip_ident = adv7280_g_chip_ident,
+ .s_std = adv7280_s_std,
+ .s_power = adv7280_s_power,
+};
+
+static const struct v4l2_subdev_ops adv7280_subdev_ops = {
+ .core = &adv7280_core_ops,
+ .video = &adv7280_video_ops,
+};
+
+static void adv7280_work(struct work_struct *work)
+{
+ struct adv7280_state *state = container_of(work, struct adv7280_state,
+ work);
+ struct i2c_client *client = v4l2_get_subdevdata(&state->sd);
+ u8 isr3;
+
+ mutex_lock(&state->mutex);
+ adv7280_write_reg(client, ADV7280_ADI_CONTROL_1,
+ ADV7280_ADI_CTRL_IRQ_SPACE);
+ isr3 = i2c_smbus_read_byte_data(client, ADV7280_ISR3_ADI);
+ /* clear */
+ adv7280_write_reg(client, ADV7280_ICR3_ADI, isr3);
+ adv7280_write_reg(client, ADV7280_ADI_CONTROL_1, 0);
+
+ if (isr3 & ADV7280_IRQ3_AD_CHANGE && state->autodetect)
+ __adv7280_status(client, NULL, &state->curr_norm);
+ mutex_unlock(&state->mutex);
+
+ enable_irq(state->irq);
+}
+
+static irqreturn_t adv7280_irq(int irq, void *devid)
+{
+ struct adv7280_state *state = devid;
+
+ schedule_work(&state->work);
+
+ disable_irq_nosync(state->irq);
+
+ return IRQ_HANDLED;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i) {
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct adv7280_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u8 val;
+
+ if (i < 6) {
+ state->active_input = i;
+ val = i2c_smbus_read_byte_data(client,
+ ADV7280_INPUT_CONTROL);
+ val &= 0xf0;
+ val |= (ADV7280_INPUT_CONTROL_COMPOSITE_IN1 +
+ state->active_input);
+ return adv7280_write_reg(client,
+ ADV7280_INPUT_CONTROL, val);
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) {
+ struct soc_camera_device *icd = file->private_data;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct adv7280_state *state = to_state(sd);
+
+ *i = state->active_input;
+
+ return 0;
+}
+
+/*
+ * Generic i2c probe
+ * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
+ */
+static int adv7280_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7280_state *state;
+ struct soc_camera_device *icd = client->dev.platform_data;
+ struct v4l2_subdev *sd;
+ u8 ident;
+ int ret;
+ struct v4l2_ioctl_ops *ops;
+ struct i2c_client vpp_client = {
+ .flags = client->flags,
+ .addr = VPP_SLAVE_ADDRESS,
+ .name = "ADV7180_VPP_SLAVE",
+ .adapter = client->adapter,
+ .dev = client->dev,
+ };
+ struct i2c_client csi_client = {
+ .flags = client->flags,
+ .addr = CSI_SLAVE_ADDRESS,
+ .name = "ADV7180_CSI_SLAVE",
+ .adapter = client->adapter,
+ .dev = client->dev,
+ };
+
+ printk("probe, id=%s\n", id->name);
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ v4l_info(client, "chip found @ 0x%02x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ state = kzalloc(sizeof(struct adv7280_state), GFP_KERNEL);
+ if (state == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ident = i2c_smbus_read_byte_data(client, ADV7280_IDENT);
+ v4l_info(client, "ident reg is 0x%02x\n", ident);
+
+ state->irq = client->irq;
+ INIT_WORK(&state->work, adv7280_work);
+ mutex_init(&state->mutex);
+ state->autodetect = true;
+ state->active_input = 0; // input 1
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &adv7280_subdev_ops);
+ //icd->ops = &adv7280_ops;
+
+ /* Reset */
+ ret = adv7280_write_reg(client,
+ ADV7280_POWER_MANAGEMENT, 0xA0);
+ if (ret < 0)
+ goto err_unreg_subdev;
+ msleep(10);
+
+ /* Initialize adv7280 */
+ /* Exit Power Down Mode */
+ ret = adv7280_write_reg(client,
+ ADV7280_POWER_MANAGEMENT, 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ ADV7280_ADI_CONTROL_1, 0x80);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x9C, 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x9C, 0xFF);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Enter User Sub Map */
+ ret = adv7280_write_reg(client,
+ ADV7280_ADI_CONTROL_1, 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Enable Pixel & Sync output drivers */
+ ret = adv7280_write_reg(client,
+ ADV7280_OUTPUT_CONTROL, 0x0C);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Power-up INTRQ, HS & VS pads */
+ ret = adv7280_write_reg(client,
+ ADV7280_EXTENDED_OUTPUT_CONTROL, 0x07);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Enable SH1 */
+ /*
+ ret = adv7280_write_reg(client,
+ ADV7280_SHAPING_FILTER_CONTROL_1, 0x41);
+ if (ret < 0)
+ goto err_unreg_subdev;
+ */
+
+ /* Disable comb filtering */
+ /*
+ ret = adv7280_write_reg(client,
+ 0x39, 0x24);
+ if (ret < 0)
+ goto err_unreg_subdev;
+ */
+
+ /* Enable LLC output driver */
+ ret = adv7280_write_reg(client,
+ ADV7280_ADI_CONTROL_2, 0x40);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* VSYNC on VS/FIELD/SFL pin */
+ ret = adv7280_write_reg(client,
+ ADV7280_OUTPUT_SYNC_SELECT_2, 0x01);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Enable autodetection */
+ ret = adv7280_write_reg(client, ADV7280_INPUT_CONTROL,
+ ADV7280_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM |
+ (ADV7280_INPUT_CONTROL_COMPOSITE_IN1 +
+ state->active_input));
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_AUTODETECT_ENABLE,
+ ADV7280_AUTODETECT_DEFAULT);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* ITU-R BT.656-4 compatible
+ ret = adv7280_write_reg(client,
+ ADV7280_EXTENDED_OUTPUT_CONTROL,
+ ADV7280_EXTENDED_OUTPUT_CONTROL_NTSCDIS);
+ if (ret < 0)
+ goto err_unreg_subdev;
+ */
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x52, 0xCD);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x80, 0x51);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x81, 0x51);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(client,
+ 0x82, 0x68);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+#ifdef HW_DEINT
+ /* Set VPP Map */
+ ret = adv7280_write_reg(client,
+ ADV7280_VPP_SLAVE_ADDRESS, (VPP_SLAVE_ADDRESS << 1));
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* VPP - not documented */
+ ret = adv7280_write_reg(&vpp_client,
+ 0xA3, 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* VPP - Enbable Advanced Timing Mode */
+ ret = adv7280_write_reg(&vpp_client,
+ VPP_ADV_TIMING_MODE_EN, 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* VPP - Enable Deinterlacer */
+ ret = adv7280_write_reg(&vpp_client,
+ VPP_I2C_DEINT_ENABLE, 0x80);
+ if (ret < 0)
+ goto err_unreg_subdev;
+#endif
+
+#ifdef CSI
+ /* Set CSI Map */
+ ret = adv7280_write_reg(client,
+ ADV7280_CSI_TX_SLAVE_ADDRESS, (CSI_SLAVE_ADDRESS << 1));
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* Power up MIPI D-PHY */
+ ret = adv7280_write_reg(&csi_client,
+ CSI_DPHY_PWRDN_CTL, 0x02);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+
+ ret = adv7280_write_reg(&csi_client,
+ 0x02 , 0x18);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(&csi_client,
+ 0xD2 , 0xF7);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(&csi_client,
+ 0xD8 , 0x65);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(&csi_client,
+ 0xE0 , 0x09);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* analog devices recommends */
+ ret = adv7280_write_reg(&csi_client,
+ 0x2C , 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* MIPI-CSI2 power up */
+ ret = adv7280_write_reg(&csi_client,
+ CSI_CSITX_PWRDN , 0x00);
+ if (ret < 0)
+ goto err_unreg_subdev;
+#endif
+
+ /* read current norm */
+ __adv7280_status(client, NULL, &state->curr_norm);
+
+ /* register for interrupts */
+ if (state->irq > 0) {
+ ret = request_irq(state->irq, adv7280_irq, 0, DRIVER_NAME,
+ state);
+ if (ret)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_ADI_CONTROL_1,
+ ADV7280_ADI_CTRL_IRQ_SPACE);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* config the Interrupt pin to be active low */
+ ret = adv7280_write_reg(client, ADV7280_ICONF1_ADI,
+ ADV7280_ICONF1_ACTIVE_LOW | ADV7280_ICONF1_PSYNC_ONLY);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_IMR1_ADI, 0);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_IMR2_ADI, 0);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ /* enable AD change interrupts interrupts */
+ ret = adv7280_write_reg(client, ADV7280_IMR3_ADI,
+ ADV7280_IRQ3_AD_CHANGE);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_IMR4_ADI, 0);
+ if (ret < 0)
+ goto err_unreg_subdev;
+
+ ret = adv7280_write_reg(client, ADV7280_ADI_CONTROL_1,
+ 0);
+ if (ret < 0)
+ goto err_unreg_subdev;
+ }
+
+ /*
+ * this is the only way to support more than one input as soc_camera
+ * assumes in its own vidioc_s(g)_input implementation that only one
+ * input is present we have to override that with our own handlers.
+ */
+ /*
+ ops = (struct v4l2_ioctl_ops*)icd->vdev->ioctl_ops;
+ ops->vidioc_s_input = &vidioc_s_input;
+ ops->vidioc_g_input = &vidioc_g_input;
+ */
+
+ return 0;
+
+err_unreg_subdev:
+ mutex_destroy(&state->mutex);
+ v4l2_device_unregister_subdev(sd);
+ kfree(state);
+err:
+ printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", ret);
+ return ret;
+}
+
+static int adv7280_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7280_state *state = to_state(sd);
+
+ if (state->irq > 0) {
+ free_irq(client->irq, state);
+ if (cancel_work_sync(&state->work)) {
+ /*
+ * Work was pending, therefore we need to enable
+ * IRQ here to balance the disable_irq() done in the
+ * interrupt handler.
+ */
+ enable_irq(state->irq);
+ }
+ }
+
+ mutex_destroy(&state->mutex);
+ v4l2_device_unregister_subdev(sd);
+ kfree(to_state(sd));
+ return 0;
+}
+
+static const struct i2c_device_id adv7280_id[] = {
+ {"adv7280", 0},
+ {"adv7280-M", 1},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, adv7280_id);
+
+static struct i2c_driver adv7280_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+ .probe = adv7280_probe,
+ .remove = adv7280_remove,
+ .id_table = adv7280_id,
+};
+
+static __init int adv7280_init(void)
+{
+ return i2c_add_driver(&adv7280_driver);
+}
+
+static __exit void adv7280_exit(void)
+{
+ i2c_del_driver(&adv7280_driver);
+}
+
+module_init(adv7280_init);
+module_exit(adv7280_exit);
+
+MODULE_DESCRIPTION("Analog Devices ADV7280/ADV7280-M video decoder driver");
+MODULE_AUTHOR("Wojciech Bieganski <wbieganski@antmicro.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/i2c/soc_camera/ap1302.c b/drivers/media/i2c/soc_camera/ap1302.c
new file mode 100644
index 000000000000..00a4315bcc39
--- /dev/null
+++ b/drivers/media/i2c/soc_camera/ap1302.c
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) Antmicro Ltd. All rights reserved.
+ * based on ov5640 driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/soc_camera.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+
+#define AP1302_NAME "ap1302"
+#define AP1302_CHIP_ID 0x0265
+#define AP1302_FW_WINDOW_OFFSET 0x8000
+#define AP1302_FW_WINDOW_SIZE 0x2000
+
+#define REG_CHIP_VERSION 0x0000
+#define REG_CHIP_REV 0x0050
+
+#define REG_ERROR 0x0006
+#define REG_CTRL 0x1000
+#define REG_ENABLE 0x1008
+#define REG_PREVIEW_WIDTH 0x2000
+#define REG_PREVIEW_HEIGHT 0x2002
+#define REG_PREVIEW_ROI_X0 0x2004
+#define REG_PREVIEW_ROI_Y0 0x2006
+#define REG_PREVIEW_ROI_X1 0x2008
+#define REG_PREVIEW_ENABLE 0x2010
+#define REG_PREVIEW_OUT_FMT 0x2012
+#define REG_PREVIEW_SENSOR_MODE 0x2014
+#define REG_PREVIEW_MAX_FPS 0x2020
+#define REG_PREVIEW_AE_USG 0x2022
+#define REG_PREVIEW_HINF_CTRL 0x2030
+#define REG_PREVIEW_HINF_SPOOF_W 0x2032
+#define REG_PREVIEW_HINF_SPOOF_H 0x2034
+#define REG_AE_CTRL 0x5002
+#define REG_AE_MANUAL_GAIN 0x5006
+#define REG_AE_MANUAL_EXP_TIME 0x500C
+#define REG_AE_BV_MIN 0x5010
+#define REG_AF_CTRL 0x5058
+#define REG_AWB_MANUAL_TEMP 0x510A
+#define REG_BOOTDATA_STAGE 0x6002
+#define REG_GAMMA 0x700A
+#define REG_CLS_LOCK_CONN 0x54C2
+#define REG_PREVIEW_DIV_HINF_MIPI 0x2064
+#define REG_SIPS_CRC 0xF052
+
+#define FW_OFFSET 0x8000
+#define FW_PLL_SIZE 832
+#define FW_CHUNK_SIZE 0x600
+
+#define REG_16B 2
+#define REG_32B 4
+
+static int ap1302_write_reg(struct i2c_client*, u16, u32, int);
+
+enum {
+ AP1302_MODE_640x480,
+ AP1302_MODE_1280x720,
+ AP1302_MODE_1920x1080,
+ AP1302_MODE_4224x3156,
+ AP1302_SIZE_LAST,
+};
+
+#define to_ap1302(sd) container_of(sd, struct ap1302_priv, subdev)
+
+struct ap1302_priv {
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mf;
+
+ int ident;
+ u16 chip_id;
+ u8 revision;
+
+ int mode;
+
+ struct i2c_client *client;
+ const struct firmware *fw;
+};
+
+static enum v4l2_mbus_pixelcode ap1302_codes[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+static const struct v4l2_frmsize_discrete ap1302_frmsizes[AP1302_SIZE_LAST] = {
+ {640, 480},
+ {1280, 720},
+ {1920, 1080},
+ {4224, 3156},
+};
+
+static int ap1302_find_mode(u32 width, u32 height)
+{
+ int i;
+
+ for (i = 0; i < AP1302_SIZE_LAST; i++) {
+ if ((ap1302_frmsizes[i].width >= width) &&
+ (ap1302_frmsizes[i].height >= height))
+ break;
+ }
+
+ /* If not found, select biggest */
+ if (i >= AP1302_SIZE_LAST)
+ i = AP1302_SIZE_LAST - 1;
+
+ return i;
+}
+
+static void ap1302_set_default_fmt(struct ap1302_priv *priv)
+{
+ struct v4l2_mbus_framefmt *mf = &priv->mf;
+
+ mf->width = ap1302_frmsizes[AP1302_MODE_640x480].width;
+ mf->height = ap1302_frmsizes[AP1302_MODE_640x480].height;
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/* Start/Stop streaming from the device */
+static int ap1302_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ap1302_priv *priv = to_ap1302(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (!enable) {
+ ap1302_set_default_fmt(priv);
+ return 0;
+ }
+
+ ap1302_write_reg(client, REG_PREVIEW_OUT_FMT, 0x0030, REG_16B);
+ ap1302_write_reg(client, REG_PREVIEW_WIDTH, priv->mf.width, REG_16B);
+ ap1302_write_reg(client, REG_PREVIEW_HEIGHT, priv->mf.height, REG_16B);
+ ap1302_write_reg(client, REG_PREVIEW_SENSOR_MODE, (priv->mf.height > 1080) ? 0x0040 : 0x0041, REG_16B);
+ ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_W, 0x0000, REG_16B);
+ ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_H, 0x0000, REG_16B);
+
+ return ret;
+}
+
+static int ap1302_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ int mode;
+
+ mode = ap1302_find_mode(mf->width, mf->height);
+ mf->width = ap1302_frmsizes[mode].width;
+ mf->height = ap1302_frmsizes[mode].height;
+
+ mf->field = V4L2_FIELD_NONE;
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+/* set the format we will capture in */
+static int ap1302_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct ap1302_priv *priv = to_ap1302(sd);
+ int ret;
+
+ ret = ap1302_try_fmt(sd, mf);
+ if (ret < 0)
+ return ret;
+
+ priv->mode = ap1302_find_mode(mf->width, mf->height);
+
+ memcpy(&priv->mf, mf, sizeof(struct v4l2_mbus_framefmt));
+
+ return 0;
+}
+
+static int ap1302_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ap1302_priv *priv = to_ap1302(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_subdev_desc *scsd = soc_camera_i2c_to_desc(client);
+
+ if (on)
+ ap1302_s_fmt(sd, &priv->mf);
+
+ return soc_camera_set_power(&client->dev, scsd, on);
+}
+
+static int ap1302_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(ap1302_codes))
+ return -EINVAL;
+
+ *code = ap1302_codes[index];
+
+ return 0;
+}
+
+static int ap1302_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
+{
+ return 0;
+}
+
+static int ap1302_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
+{
+ return 0;
+}
+
+static int ap1302_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ return 0;
+}
+
+/* Get chip identification */
+static int ap1302_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct ap1302_priv *priv = to_ap1302(sd);
+
+ id->ident = priv->ident;
+ id->revision = priv->revision;
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops ap1302_video_ops = {
+ .s_stream = ap1302_s_stream,
+ .s_mbus_fmt = ap1302_s_fmt,
+ .try_mbus_fmt = ap1302_try_fmt,
+ .enum_mbus_fmt = ap1302_enum_fmt,
+ .cropcap = ap1302_cropcap,
+ .g_crop = ap1302_g_crop,
+ .querystd = ap1302_querystd,
+};
+
+ int (*reset)(struct v4l2_subdev *sd, u32 val);
+
+static struct v4l2_subdev_core_ops ap1302_core_ops = {
+ .g_chip_ident = ap1302_g_chip_ident,
+ .s_power = ap1302_s_power,
+};
+
+static struct v4l2_subdev_ops ap1302_subdev_ops = {
+ .core = &ap1302_core_ops,
+ .video = &ap1302_video_ops,
+};
+
+static int ap1302_read_reg(struct i2c_client *client, u16 addr, u16 *val)
+{
+ int err;
+ struct i2c_msg msg[2];
+ unsigned char data[4];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 2;
+ msg[0].buf = data;
+
+ /* high byte goes out first */
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = 2;
+ msg[1].buf = data + 2;
+
+ err = i2c_transfer(client->adapter, msg, REG_16B);
+
+ if (err != 2)
+ return -EINVAL;
+
+ *val = (data[2] << 8) | data[3];
+
+ return 0;
+}
+
+static int ap1302_write_reg(struct i2c_client *client, u16 addr, u32 value, int size)
+{
+ int count;
+ struct i2c_msg msg[1];
+ unsigned char data[6];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data[0] = (u8) (addr >> 8);
+ data[1] = (u8) (addr & 0xff);
+
+ if (size == 1) {
+ data[2] = (u8) (value & 0xff);
+ }
+ else if (size == 2) {
+ data[2] = (u8) (value >> 8);
+ data[3] = (u8) (value & 0xff);
+ }
+ else if (size == 4) {
+ data[2] = (u8) (value >> 24);
+ data[3] = (u8) (value >> 16);
+ data[4] = (u8) (value >> 8);
+ data[5] = (u8) (value & 0xff);
+ }
+ else
+ return -1;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = (size + 2);
+ msg[0].buf = data;
+
+ count = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (count == ARRAY_SIZE(msg)) {
+ return 0;
+ }
+ dev_err(&client->dev,
+ "ap1302: i2c transfer failed, addr: %x, value: %02x\n",
+ addr, (u32)value);
+ return -EIO;
+}
+
+static int ap1302_write_bulk_reg(struct i2c_client *client, u16 reg_addr, u8 *data, int len)
+{
+ int err;
+ struct i2c_msg msg;
+ u8 data_with_addr[len+2];
+
+ if (!client->adapter)
+ return -ENODEV;
+
+ data_with_addr[0] = (u8)(reg_addr >> 8);
+ data_with_addr[1] = (u8)(reg_addr & 0xFF);
+ memcpy((u8*)&data_with_addr[2], data, len);
+
+ len = (len + 2);
+ msg.addr = client->addr;
+ msg.flags = 0;
+ msg.len = len;
+ msg.buf = data_with_addr;
+
+ err = i2c_transfer(client->adapter, &msg, 1);
+ if (err == 1)
+ return 0;
+
+ dev_err(&client->dev, "ap1302: i2c transfer failed at %x\n",
+ (int)data[0] << 8 | data[1]);
+
+ return err;
+}
+
+static int ap1302_request_firmware(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ap1302_priv *priv = to_ap1302(sd);
+ int ret;
+ ret = request_firmware(&priv->fw, "ap1302_fw.bin", &client->dev);
+ if (ret)
+ dev_err(&client->dev,
+ "ap1302_request_firmware failed. ret=%d\n", ret);
+ return ret;
+}
+
+static int ap1302_write_firmware(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct ap1302_priv *priv = to_ap1302(sd);
+ int err = 0;
+ u16 pos = 0, ppos = 0, smpos = 0;
+ int fw_size = priv->fw->size;
+ u8 *fw_data = (u8 *)(priv->fw->data);
+ u32 packet_size = 0, sm_packet_size = 0;
+
+ err += ap1302_write_reg(client, REG_SIPS_CRC, 0xFFFF, REG_16B);
+
+ /* write pll firmware */
+ err += ap1302_write_bulk_reg(client, 0x8000, fw_data, FW_PLL_SIZE);
+
+ /* write the rest of the firmware */
+ err += ap1302_write_reg(client, REG_BOOTDATA_STAGE, 0x0002, REG_16B);
+ pos = FW_PLL_SIZE;
+ ppos = FW_PLL_SIZE;
+
+ /* split fw into packets (8192B) */
+ for (pos = FW_PLL_SIZE; pos < fw_size; pos += packet_size) {
+ if (fw_size - pos < AP1302_FW_WINDOW_SIZE - ppos)
+ packet_size = fw_size - pos;
+ else
+ packet_size = AP1302_FW_WINDOW_SIZE - ppos;
+
+ /* split each packet into chunks (each having FW_CHUNK_SIZE) */
+ sm_packet_size = FW_CHUNK_SIZE;
+ for (smpos = 0; smpos < packet_size; smpos += sm_packet_size) {
+ if ((smpos + sm_packet_size) > packet_size)
+ sm_packet_size = packet_size - smpos;
+ err += ap1302_write_bulk_reg(client, (FW_OFFSET + ppos + smpos),
+ (u8 *)(fw_data + pos + smpos), sm_packet_size);
+ }
+
+ msleep(10);
+
+ ppos += packet_size;
+ if (ppos >= AP1302_FW_WINDOW_SIZE)
+ ppos = 0;
+ }
+
+ /* TODO: check CRC and ERROR regs */
+ ap1302_write_reg(client, REG_BOOTDATA_STAGE, 0xFFFF, REG_16B);
+
+ return err;
+}
+
+static int ap1302_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct ap1302_priv *priv;
+ struct soc_camera_subdev_desc *scsd;
+ u16 chip_id = 0;
+ int ret = 0;
+
+ /* Checking soc-camera interface */
+ scsd = soc_camera_i2c_to_desc(client);
+ if (!scsd) {
+ dev_err(&client->dev, "Missing soc_camera_link for driver\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(struct ap1302_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(&client->dev, "Failed to allocate private data!\n");
+ return -ENOMEM;
+ }
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &ap1302_subdev_ops);
+
+ priv->client = client;
+
+ soc_camera_power_on(&client->dev, scsd);
+
+ /* TODO: this should be moved to boardfile, because
+ * now the driver works only on Jetson TK1 */
+ gpio_set_value(219, 0);
+ msleep(100);
+ gpio_set_value(219, 1);
+ msleep(100);
+
+ ap1302_set_default_fmt(priv);
+
+ ap1302_read_reg(client, REG_CHIP_VERSION, &chip_id);
+ printk(KERN_INFO "AP1302 chip id = 0x%04x\n", chip_id);
+ if (chip_id != 0x265) {
+ soc_camera_power_off(&client->dev, scsd);
+ return -1;
+ }
+
+ ret = ap1302_request_firmware(&(priv->subdev));
+ if (ret) {
+ dev_err(&client->dev, "Cannot request ap1302 firmware.\n");
+ return -1;
+ }
+
+ /* loading firmware */
+ ret += ap1302_write_firmware(&(priv->subdev));
+
+ /* configuring isp */
+ ret += ap1302_write_reg(client, REG_PREVIEW_WIDTH, 0x0280, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HEIGHT, 0x01E0, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_OUT_FMT, 0x0030, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_MAX_FPS, 0xF000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HINF_CTRL, 0x0034, REG_16B);
+ ret += ap1302_write_reg(client, REG_AF_CTRL, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_ENABLE, 0x0086, REG_16B);
+ ret += ap1302_write_reg(client, REG_AE_BV_MIN, 0x0100, REG_16B);
+ ret += ap1302_write_reg(client, REG_AE_MANUAL_GAIN, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_AE_CTRL, 0x002A, REG_16B);
+ ret += ap1302_write_reg(client, REG_ENABLE, 0x0001, REG_16B);
+ ret += ap1302_write_reg(client, REG_AE_MANUAL_EXP_TIME, 0x00003CF0, REG_32B);
+ ret += ap1302_write_reg(client, REG_AWB_MANUAL_TEMP, 0x11F8, REG_16B);
+ ret += ap1302_write_reg(client, REG_GAMMA, 0x2000, REG_16B);
+ ret += ap1302_write_reg(client, REG_CLS_LOCK_CONN, 0x0024, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_ROI_X0, 0x8000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_ROI_X1, 0x7FFF, REG_16B);
+ ret += ap1302_write_reg(client, REG_CLS_LOCK_CONN, 0x0024, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_DIV_HINF_MIPI, 0x00030000, REG_32B);
+ ret += ap1302_write_reg(client, REG_CTRL, 0x0001, REG_16B);
+ ret += ap1302_write_reg(client, REG_CTRL, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_AE_USG, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_WIDTH, 0x0280, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HEIGHT, 0x01E0, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_SENSOR_MODE, 0x0041, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_W, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_H, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_OUT_FMT, 0x0030, REG_16B);
+
+ /* setting default mode: 640x480 */
+ ret += ap1302_write_reg(client, REG_PREVIEW_WIDTH, 0x0280, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HEIGHT, 0x01E0, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_SENSOR_MODE, 0x0041, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_W, 0x0000, REG_16B);
+ ret += ap1302_write_reg(client, REG_PREVIEW_HINF_SPOOF_H, 0x0000, REG_16B);
+
+ return ret;
+}
+
+static int ap1302_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id ap1302_id[] = {
+ { "ap1302", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ap1302_id);
+
+static struct i2c_driver ap1302_i2c_driver = {
+ .driver = {
+ .name = "ap1302",
+ },
+ .probe = ap1302_probe,
+ .remove = ap1302_remove,
+ .id_table = ap1302_id,
+};
+
+static int __init ap1302_module_init(void)
+{
+ return i2c_add_driver(&ap1302_i2c_driver);
+}
+
+static void __exit ap1302_module_exit(void)
+{
+ i2c_del_driver(&ap1302_i2c_driver);
+}
+
+module_init(ap1302_module_init);
+module_exit(ap1302_module_exit);
+
+MODULE_DESCRIPTION("SoC Camera driver for AP1302 ISP from ON Semiconductor");
+MODULE_AUTHOR("Wojciech Bieganski <wbieganski@antmicro.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/soc_camera/ov5640.c b/drivers/media/i2c/soc_camera/ov5640.c
index 38315b514407..acd532b8b7d3 100644
--- a/drivers/media/i2c/soc_camera/ov5640.c
+++ b/drivers/media/i2c/soc_camera/ov5640.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2017 Antmicro Ltd. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -30,1073 +31,853 @@ struct ov5640_reg {
#define OV5640_TABLE_WAIT_MS 0
#define OV5640_TABLE_END 1
+/* System and IO Pad Control [0x3000 ~ 0x3052] */
+#define OV5640_SYSTEM_RESET_00 0x3000 /* default 0x30 -> all 0x00 */
+#define OV5640_SYSTEM_RESET_01 0x3001 /* default 0x08 */
+#define OV5640_SYSTEM_RESET_02 0x3002 /* default 0x1c */
+#define OV5640_SYSTEM_RESET_03 0x3003 /* default 0x00 */
+#define OV5640_SYSTEM_CLK_EN_00 0x3004 /* default 0xcf -> all 0xff */
+#define OV5640_SYSTEM_CLK_EN_01 0x3005 /* default 0xf7 */
+#define OV5640_SYSTEM_CLK_EN_02 0x3006 /* default 0xe3 -> all 0xc3 */
+#define OV5640_SYSTEM_CLK_EN_03 0x3007 /* default 0xff */
+#define OV5640_SYSTEM_CTRL 0x3008 /* default 0x02 all -> (0x82, 0x42, 0x02) */
+#define OV5640_CHIP_ID_H 0x300a /* default 0x56 */
+#define OV5640_CHIP_ID_L 0x300b /* default 0x40 */
+#define OV5640_SYSTEM_MIPI_CTRL 0x300e /* default 0x58 -> all 0x45 */
+#define OV5640_IO_CTRL_01 0x3017 /* default 0x00 */
+#define OV5640_IO_CTRL_02 0x3018 /* default 0x00 */
+#define OV5640_SYSTEM_CTRL_2D 0x302d /* default ---- -> all 0x60 !! */
+#define OV5640_SYSTEM_CTRL_2E 0x302e /* default ---- -> all 0x08 !! */
+#define OV5640_SC_PLL_CTRL0 0x3034 /* default 0x1a -> all 0x18 MIPI_MODE */
+#define OV5640_SC_PLL_CTRL1 0x3035 /* default 0x11 -> 0x11 0x11 0x21 0x14 */
+#define OV5640_SC_PLL_CTRL2 0x3036 /* default 0x69 -> 0x7d 0x54 0x70 0x70 */
+#define OV5640_SC_PLL_CTRL3 0x3037 /* default 0x03 -> all 0x13 */
+#define OV5640_SC_PLL_CTRL5 0x3039 /* not use */
+
+#define SYSTEM_CTRL_SRST 0x80 /* Bit[7]: Software reset */
+#define SYSTEM_CTRL_PDOWN 0x40 /* Bit[6]: Software power down */
+#define MIPI_MODE_8BIT 0x08 /* Bit[3:0]: MIPI bit mode - 8-bit mode */
+#define MIPI_MODE_10BIT 0x0a /* Bit[3:0]: MIPI bit mode - 10-bit mode */
+
+/* SCCB Control [0x3100 ~ 0x3108] */
+#define OV5640_SCCB_SYSTEM_CTRL 0x3103 /* default 0x00 -> all 0x03 */
+#define OV5640_SCCB_SCLK 0x3108 /* default 0x16 -> all 0x01 */
+
+/* VCM Control [0x3600 ~ 0x3606] */
+#define OV5640_VCM_DEBUG_MODE_0 0x3600 /* default ---- -> all 0x08 */
+#define OV5640_VCM_DEBUG_MODE_1 0x3601 /* default ---- -> all 0x33 */
+
+/* Timing Control [0x3800 ~ 0x3821] */
+#define OV5640_TIMING_HS_H 0x3800 /* default 0x00 -> 0x00 0x01 0x00 0x00 X_ADDR_ST */
+#define OV5640_TIMING_HS_L 0x3801 /* default 0x00 -> 0x00 0x50 0x00 0x00 */
+#define OV5640_TIMING_VS_H 0x3802 /* default 0x00 -> 0x00 0x01 0x00 0x00 Y_ADDR_ST */
+#define OV5640_TIMING_VS_L 0x3803 /* default 0x00 -> 0x00 0xaa 0x00 0x04 */
+#define OV5640_TIMING_HW_H 0x3804 /* default 0x0a -> 0x0a 0x08 0x0a 0x0a X_ADDR_END */
+#define OV5640_TIMING_HW_L 0x3805 /* default 0x3f -> 0x3f 0xef 0x3f 0x3f 2623 */
+#define OV5640_TIMING_VH_H 0x3806 /* default 0x07 -> 0x07 0x05 0x07 0x07 Y_ADDR_END */
+#define OV5640_TIMING_VH_L 0x3807 /* default 0x9f -> 0x9f 0xf9 0x9f 0x9b */
+#define OV5640_TIMING_DVPHO_H 0x3808 /* default 0x0a -> 0x0a 0x07 0x05 0x02 width[11:8] */
+#define OV5640_TIMING_DVPHO_L 0x3809 /* default 0x20 -> 0x30 0x90 0x18 0x80 width[7:0] */
+#define OV5640_TIMING_DVPVO_H 0x380a /* default 0x07 -> 0x07 0x04 0x03 0x01 height[11:8] */
+#define OV5640_TIMING_DVPVO_L 0x380b /* default 0x98 -> 0x9c 0x48 0xcc 0xe0 height[7:0] */
+#define OV5640_TIMING_HTS_H 0x380c /* default 0x0b -> 0x0b 0x09 0x07 0x07 */
+#define OV5640_TIMING_HTS_L 0x380d /* default 0x1c -> 0x1c 0xc4 0x5e 0x68 */
+#define OV5640_TIMING_VTS_H 0x380e /* default 0x07 -> 0x07 0x04 0x03 0x03 */
+#define OV5640_TIMING_VTS_L 0x380f /* default 0xb0 -> 0xb0 0x60 0xde 0xd8 */
+#define OV5640_TIMING_HOFFSET_H 0x3810 /* default 0x00 */
+#define OV5640_TIMING_HOFFSET_L 0x3811 /* default 0x10 -> 0x08 0x08 0x06 0x10 */
+#define OV5640_TIMING_VOFFSET_H 0x3812 /* default 0x00 */
+#define OV5640_TIMING_VOFFSET_L 0x3813 /* default 0x04 -> 0x02 0x04 0x02 0x06 */
+#define OV5640_TIMING_X_INC 0x3814 /* default 0x11 -> 0x11 0x11 0x31 0x31 */
+#define OV5640_TIMING_Y_INC 0x3815 /* default 0x11 -> 0x11 0x11 0x31 0x31 */
+#define OV5640_TIMING_REG20 0x3820 /* default 0x40 */
+#define OV5640_TIMING_REG21 0x3821 /* default 0x00 -> 0x06 0x06 0x07 0x07 */
+#define REG20_SENSOR_VFLIP 0x02
+#define REG20_SENSOR_ISP_FLIP 0x04
+#define REG21_BINNING_EN 0x01
+#define REG21_SENSOR_MIRROR 0x02
+#define REG21_ISP_MIRROR 0x04
+
+#define OV5640_ADDR_H(x) ((x >> 8 ) & 0x0f)
+#define OV5640_ADDR_L(x) (x & 0xff)
+#define OV5640_WIDTH_H(x) ((x >> 8 ) & 0x0f)
+#define OV5640_WIDTH_L(x) (x & 0xff)
+#define OV5640_HEIGHT_H(x) ((x >> 8 ) & 0x0f)
+#define OV5640_HEIGHT_L(x) (x & 0xff)
+#define OV5640_HTS_H(x) ((x >> 8 ) & 0x1f)
+#define OV5640_HTS_L(x) (x & 0xff)
+#define OV5640_VTS_H(x) ((x >> 8 ) & 0xff)
+#define OV5640_VTS_L(x) (x & 0xff)
+#define OV5640_H_OFFSET_H(x) ((x >> 8 ) & 0x0F)
+#define OV5640_H_OFFSET_L(x) (x & 0xff)
+#define OV5640_V_OFFSET_H(x) ((x >> 8 ) & 0x03)
+#define OV5640_V_OFFSET_L(x) (x & 0xff)
+
+/* AEC/AGC power down domain control [0x3A00 ~ 0x3A25] */
+#define OV5640_AEC_CTRL_0F 0x3a0f /* default 0x78 -> all 0x30 */
+#define OV5640_AEC_CTRL_10 0x3a10 /* default 0x68 -> all 0x28 */
+#define OV5640_AEC_CTRL_11 0x3a11 /* default 0xd0 -> all 0x60 */
+#define OV5640_AEC_CTRL_1B 0x3a1b /* default 0x78 -> all 0x30 */
+#define OV5640_AEC_CTRL_1E 0x3a1e /* default 0x68 -> all 0x26 */
+#define OV5640_AEC_CTRL_1F 0x3a1f /* default 0x40 -> all 0x14 */
+#define OV5640_AEC_MAX_EXPO60_H 0x3a02 /* default 0x3d -> 0x07 0x04 0x03 0x03 */
+#define OV5640_AEC_MAX_EXPO60_L 0x3a03 /* default 0x80 -> 0xb6 0x60 0xd8 0xd8 */
+#define OV5640_AEC_B50_STEP_H 0x3a08 /* default 0x01 */
+#define OV5640_AEC_B50_STEP_L 0x3a09 /* default 0x27 -> 0x27 0x50 0x27 0x27 */
+#define OV5640_AEC_B60_STEP_H 0x3a0a /* default 0x00 -> 0x00 0x01 0x00 0x00 */
+#define OV5640_AEC_B60_STEP_L 0x3a0b /* default 0xf6 -> 0xf6 0x18 0xf6 0xf6 */
+#define OV5640_AEC_CTRL_0D 0x3a0d /* default 0x08 -> 0x06 0x03 0x03 0x03 */
+#define OV5640_AEC_CTRL_0E 0x3a0e /* default 0x06 -> 0x06 0x04 0x04 0x04 */
+#define OV5640_AEC_CTRL_13 0x3a13 /* default 0x40 -> all 0x43 */
+#define OV5640_AEC_MAX_EXPO_H 0x3a14 /* default 0x0e -> 0x07 0x04 0x03 0x03 */
+#define OV5640_AEC_MAX_EXPO_L 0x3a15 /* default 0x40 -> 0xb0 0xd0 0xd8 0xd8 */
+#define OV5640_AEC_GAIN_CEILING_0 0x3a18 /* default 0x03 -> all 0x00 */
+#define OV5640_AEC_GAIN_CEILING_1 0x3a19 /* default 0xE0 -> all 0xf8 */
+
+/* 50/60Hz detector control [0x3C00 ~ 0x3C1E] */
+#define OV5640_5060_CTRL_01 0x3c01 /* default 0x00 -> all 0x34 */
+#define OV5640_5060_CTRL_04 0x3c04 /* default 0x20 -> all 0x28 */
+#define OV5640_5060_CTRL_05 0x3c05 /* default 0x70 -> all 0x98 */
+
+#define OV5640_5060_THRESHOLD_1_H 0x3c06 /* default 0x00 -> all 0x00 */
+#define OV5640_5060_THRESHOLD_1_L 0x3c07 /* default 0x00 -> all 0x07 0x07 0x07 0x07 0x08 */
+#define OV5640_5060_THRESHOLD_2_H 0x3c08 /* default 0x01 -> all 0x00 */
+#define OV5640_5060_THRESHOLD_2_L 0x3c09 /* default 0x2c -> all 0x1c */
+#define OV5640_5060_SAMPLE_NUM_H 0x3c0a /* default 0x4e -> all 0x9c */
+#define OV5640_5060_SAMPLE_NUM_L 0x3c0b /* default 0x1f -> all 0x40 */
+
+/* BLC Control [0x4000 ~ 0x4033] */
+#define OV5640_BLC_CTRL_01 0x4001 /* default 0x00 -> 0x02 */
+#define OV5640_BLC_CTRL_04 0x4004 /* default 0x08 -> 0x06 0x06 0x02 0x02 */
+
+/* MIPI control [0x4800 ~ 0x4837] */
+#define OV5640_MIPI_CTRL_00 0x4800 /* default 0x04 */
+#define OV5640_MIPI_CTRL_01 0x4801 /* not use */
+#define OV5640_MIPI_CTRL_05 0x4805 /* not use */
+#define OV5640_MIPI_PCLK_PERIOD 0x4837 /* default 0x10 -> 0x0a 0x0a 0x10 0x44 */
+
+/* Format Control [0x4300 ~ 0x430D] */
+#define FMT_CTRL_00 0x4300 /* default 0xF8 -> all 0x32 */
+
+/* Format Control */
+#define FMT_YUV422 0x30
+/* Output sequence */
+#define OFMT_YUYV 0x00
+#define OFMT_YVYU 0x01
+#define OFMT_UYVY 0x02
+#define OFMT_VYUY 0x03
+
+/* ISP Control [0x5000 ~ 0x5063] */
+#define OV5640_ISP_CTRL_00 0x5000 /* default 0x06 -> all 0xa7 */
+#define OV5640_ISP_CTRL_01 0x5001 /* default 0x01 -> 0x83 0x83 0x83 0xa3 */
+#define OV5640_ISP_MUX_CTRL 0x501F /* default 0x00 -> all 0x00 (000: ISP YUV422) */
+#define OV5640_ISP_DEBUG_25 0x5025 /* default ---- -> all 0x00 */
+#define OV5640_ISP_TEST 0x503D /* default 0x00 */
+#define OV5640_ISP_DEBUG_3E 0x503E /* default ---- */
+#define OV5640_ISP_DEBUG_46 0x5046 /* default ---- */
+
+#define ISP_SCALE_DIGITAL 0x80 /* Special digital effect */
+#define ISP_SCALE_EN 0x20 /* Scale enable */
+#define ISP_AUTO_BALANCE_EN 0x01 /* Auto white balance enable */
+#define ISP_COLOR_MATRIX_EN 0x02 /* Color matrix enable */
+#define ISP_TEST_EN 0x80
+#define ISP_TEST_00 0x00 /* 00: Standard eight color bar */
+#define ISP_TEST_01 0x01 /* 01: Gradual change at vertical mode 1 */
+#define ISP_TEST_10 0x02 /* 10: Gradual change at horizontal */
+#define ISP_TEST_11 0x03 /* 11: Gradual change at vertical mode 2 */
+#define ISP_TEST_TRANSPARENT 0x20 /* Transparent */
+#define ISP_TEST_ROLLING 0x40 /* Rolling */
+
+/* AWB Control [0x5180 ~ 0x51D0] */
+#define OV5640_AWB_CTRL_00 0x5180 /* default 0xff */
+#define OV5640_AWB_CTRL_01 0x5181 /* default 0x58 -> all 0xf2 */
+#define OV5640_AWB_CTRL_02 0x5182 /* default 0x11 -> all 0x00 */
+#define OV5640_AWB_CTRL_03 0x5183 /* default 0x90 -> all 0x14 */
+#define OV5640_AWB_CTRL_04 0x5184 /* default 0x25 */
+#define OV5640_AWB_CTRL_05 0x5185 /* default 0x24 */
+#define OV5640_AWB_CTRL_06 0x5186 /* default ---- -> all 0x09 */
+#define OV5640_AWB_CTRL_07 0x5187 /* default ---- -> all 0x09 */
+#define OV5640_AWB_CTRL_08 0x5188 /* default ---- -> all 0x09 */
+#define OV5640_AWB_CTRL_09 0x5189 /* default ---- -> all 0x75 */
+#define OV5640_AWB_CTRL_10 0x518a /* default ---- -> all 0x54 */
+#define OV5640_AWB_CTRL_11 0x518b /* default ---- -> all 0xe0 */
+#define OV5640_AWB_CTRL_12 0x518c /* default ---- -> all 0xb2 */
+#define OV5640_AWB_CTRL_13 0x518d /* default ---- -> all 0x42 */
+#define OV5640_AWB_CTRL_14 0x518e /* default ---- -> all 0x3d */
+#define OV5640_AWB_CTRL_15 0x518f /* default ---- -> all 0x56 */
+#define OV5640_AWB_CTRL_16 0x5190 /* default ---- -> all 0x46 */
+#define OV5640_AWB_CTRL_17 0x5191 /* default 0xff -> all 0xf8 */
+#define OV5640_AWB_CTRL_18 0x5192 /* default 0x00 -> all 0x04 */
+#define OV5640_AWB_CTRL_19 0x5193 /* default 0xf0 -> all 0x70 */
+#define OV5640_AWB_CTRL_20 0x5194 /* default 0xf0 */
+#define OV5640_AWB_CTRL_21 0x5195 /* default 0xf0 */
+#define OV5640_AWB_CTRL_22 0x5196 /* default 0x03 */
+#define OV5640_AWB_CTRL_23 0x5197 /* default 0x02 -> all 0x01 */
+#define OV5640_AWB_CTRL_24 0x5198 /* default ---- -> all 0x04 */
+#define OV5640_AWB_CTRL_25 0x5199 /* default ---- -> all 0x12 */
+#define OV5640_AWB_CTRL_26 0x519a /* default ---- -> all 0x04 */
+#define OV5640_AWB_CTRL_27 0x519b /* default ---- -> all 0x00 */
+#define OV5640_AWB_CTRL_28 0x519c /* default ---- -> all 0x06 */
+#define OV5640_AWB_CTRL_29 0x519d /* default ---- -> all 0x82 */
+#define OV5640_AWB_CTRL_30 0x519e /* default 0x02 -> all 0x38 */
+
+/* CIP Control [0x5300 ~ 0x530F] */
+#define OV5640_CIP_SH_MT_THRES_1 0x5300 /* default 0x08 */
+#define OV5640_CIP_SH_MT_THRES_2 0x5301 /* default 0x48 -> all 0x30 */
+#define OV5640_CIP_SH_MT_OFFSET_1 0x5302 /* default 0x18 -> all 0x10 */
+#define OV5640_CIP_SH_MT_OFFSET_2 0x5303 /* default 0x0e -> all 0x00 */
+#define OV5640_CIP_DNS_THRES_1 0x5304 /* default 0x08 */
+#define OV5640_CIP_DNS_THRES_2 0x5305 /* default 0x48 -> all 0x30 */
+#define OV5640_CIP_DNS_OFFSET_1 0x5306 /* default 0x09 -> all 0x08 */
+#define OV5640_CIP_DNS_OFFSET_2 0x5307 /* default 0x16 */
+#define OV5640_CIP_SH_TH_THRES_1 0x5309 /* default 0x08 */
+#define OV5640_CIP_SH_TH_THRES_2 0x530a /* default 0x48 -> all 0x30 */
+#define OV5640_CIP_SH_TH_OFFSET_1 0x530b /* default 0x04 */
+#define OV5640_CIP_SH_TH_OFFSET_2 0x530c /* default 0x06 */
+
+/* CMX Control [0x5380 ~ 0x538B] */
+#define OV5640_CMX1 0x5381 /* default 0x20 -> all 0x1e */
+#define OV5640_CMX2 0x5382 /* default 0x64 -> all 0x5b */
+#define OV5640_CMX3 0x5383 /* default 0x08 */
+#define OV5640_CMX4 0x5384 /* default 0x30 -> all 0x0a */
+#define OV5640_CMX5 0x5385 /* default 0x80 -> all 0x7e */
+#define OV5640_CMX6 0x5386 /* default 0xc0 -> all 0x88 */
+#define OV5640_CMX7 0x5387 /* default 0xa0 -> all 0x7c */
+#define OV5640_CMX8 0x5388 /* default 0x98 -> all 0x6c */
+#define OV5640_CMX9 0x5389 /* default 0x08 -> all 0x10 */
+#define OV5640_CMX_SIGN_0 0x538A /* default 0x01 */
+#define OV5640_CMX_SIGN_1 0x538B /* default 0x98 */
+
+/* Gamma Control [0x5480 ~ 0x5490] */
+#define OV5640_GAMMA_CTRL_00 0x5480 /* default 0x00 -> all 0x01 */
+#define OV5640_GAMMA_YST_00 0x5481 /* default 0x26 -> all 0x08 */
+#define OV5640_GAMMA_YST_01 0x5482 /* default 0x35 -> all 0x14 */
+
+#define OV5640_GAMMA_YST_02 0x5483 /* default 0x48 -> all 0x28 */
+#define OV5640_GAMMA_YST_03 0x5484 /* default 0x57 -> all 0x51 */
+#define OV5640_GAMMA_YST_04 0x5485 /* default 0x63 -> all 0x65 */
+#define OV5640_GAMMA_YST_05 0x5486 /* default 0x6e -> all 0x71 */
+
+#define OV5640_GAMMA_YST_06 0x5487 /* default 0x77 -> all 0x7d */
+#define OV5640_GAMMA_YST_07 0x5488 /* default 0x80 -> all 0x87 */
+#define OV5640_GAMMA_YST_08 0x5489 /* default 0x88 -> all 0x91 */
+#define OV5640_GAMMA_YST_09 0x548a /* default 0x96 -> all 0x9a */
+#define OV5640_GAMMA_YST_0A 0x548b /* default 0xa3 -> all 0xaa */
+#define OV5640_GAMMA_YST_0B 0x548c /* default 0xaf -> all 0xb8 */
+#define OV5640_GAMMA_YST_0C 0x548d /* default 0xc5 -> all 0xcd */
+#define OV5640_GAMMA_YST_0D 0x548e /* default 0xd7 -> all 0xdd */
+#define OV5640_GAMMA_YST_0E 0x548f /* default 0xe8 -> all 0xea */
+#define OV5640_GAMMA_YST_0F 0x5490 /* default 0x0f -> all 0x1d */
+
+/* SDE Control [0x5580 ~ 0x558C] */
+#define OV5640_SDE_CTRL_0 0x5580 /* default 0x00 -> all 0x02 */
+#define OV5640_SDE_CTRL_3 0x5583 /* default 0x40 */
+#define OV5640_SDE_CTRL_4 0x5584 /* default 0x40 -> all 0x10 */
+#define OV5640_SDE_CTRL_9 0x5589 /* default 0x01 -> all 0x10 */
+#define OV5640_SDE_CTRL_10 0x558a /* default 0x01 -> all 0x00 */
+#define OV5640_SDE_CTRL_11 0x558b /* default 0xff -> all 0xf8 */
+
+/* LENC Control [0x5800 ~ 0x5849] */
+#define OV5640_LENC_GMTRX_00 0x5800 /* default 0x10 -> all 0x23 */
+#define OV5640_LENC_GMTRX_01 0x5801 /* default 0x10 -> all 0x14 */
+#define OV5640_LENC_GMTRX_02 0x5802 /* default 0x10 -> all 0x0f */
+#define OV5640_LENC_GMTRX_03 0x5803 /* default 0x10 -> all 0x0f */
+#define OV5640_LENC_GMTRX_04 0x5804 /* default 0x10 -> all 0x12 */
+#define OV5640_LENC_GMTRX_05 0x5805 /* default 0x10 -> all 0x26 */
+#define OV5640_LENC_GMTRX_10 0x5806 /* default 0x10 -> all 0x0c */
+#define OV5640_LENC_GMTRX_11 0x5807 /* default 0x08 */
+#define OV5640_LENC_GMTRX_12 0x5808 /* default 0x08 -> all 0x05 */
+#define OV5640_LENC_GMTRX_13 0x5809 /* default 0x08 -> all 0x05 */
+#define OV5640_LENC_GMTRX_14 0x580a /* default 0x08 -> all */
+#define OV5640_LENC_GMTRX_15 0x580b /* default 0x10 -> all 0x0d */
+#define OV5640_LENC_GMTRX_20 0x580c /* default 0x10 -> all 0x08 */
+#define OV5640_LENC_GMTRX_21 0x580d /* default 0x08 -> all 0x03 */
+#define OV5640_LENC_GMTRX_22 0x580e /* default 0x00 */
+#define OV5640_LENC_GMTRX_23 0x580f /* default 0x00 */
+#define OV5640_LENC_GMTRX_24 0x5810 /* default 0x08 -> all 0x03 */
+#define OV5640_LENC_GMTRX_25 0x5811 /* default 0x10 -> all 0x09 */
+#define OV5640_LENC_GMTRX_30 0x5812 /* default 0x10 -> all 0x07 */
+#define OV5640_LENC_GMTRX_31 0x5813 /* default 0x08 -> all 0x03 */
+#define OV5640_LENC_GMTRX_32 0x5814 /* default 0x00 */
+#define OV5640_LENC_GMTRX_33 0x5815 /* default 0x00 -> all 0x01 */
+#define OV5640_LENC_GMTRX_34 0x5816 /* default 0x08 -> all 0x03 */
+#define OV5640_LENC_GMTRX_35 0x5817 /* default 0x10 -> all 0x08 */
+#define OV5640_LENC_GMTRX_40 0x5818 /* default 0x10 -> all 0x0d */
+#define OV5640_LENC_GMTRX_41 0x5819 /* default 0x08 */
+#define OV5640_LENC_GMTRX_42 0x581a /* default 0x08 -> all 0x05 */
+#define OV5640_LENC_GMTRX_43 0x581b /* default 0x08 -> all 0x06 */
+#define OV5640_LENC_GMTRX_44 0x581c /* default 0x08 */
+#define OV5640_LENC_GMTRX_45 0x581d /* default 0x10 -> all 0x0e */
+#define OV5640_LENC_GMTRX_50 0x581e /* default 0x10 -> all 0x29 */
+#define OV5640_LENC_GMTRX_51 0x581f /* default 0x10 -> all 0x17 */
+#define OV5640_LENC_GMTRX_52 0x5820 /* default 0x10 -> all 0x11 */
+#define OV5640_LENC_GMTRX_53 0x5821 /* default 0x10 -> all 0x11 */
+#define OV5640_LENC_GMTRX_54 0x5822 /* default 0x10 -> all 0x15 */
+#define OV5640_LENC_GMTRX_55 0x5823 /* default 0x10 -> all 0x28 */
+#define OV5640_LENC_BRMATRX_00 0x5824 /* default 0xaa -> all 0x46 */
+#define OV5640_LENC_BRMATRX_01 0x5825 /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_02 0x5826 /* default 0xaa -> all 0x08 */
+#define OV5640_LENC_BRMATRX_03 0x5827 /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_04 0x5828 /* default 0xaa -> all 0x64 */
+#define OV5640_LENC_BRMATRX_05 0x5829 /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_06 0x582a /* default 0x99 -> all 0x24 */
+#define OV5640_LENC_BRMATRX_07 0x582b /* default 0x99 -> all 0x22 */
+#define OV5640_LENC_BRMATRX_08 0x582c /* default 0x99 -> all 0x24 */
+#define OV5640_LENC_BRMATRX_09 0x582d /* default 0xaa -> all 0x24 */
+#define OV5640_LENC_BRMATRX_20 0x582e /* default 0xaa -> all 0x06 */
+#define OV5640_LENC_BRMATRX_21 0x582f /* default 0x99 -> all 0x22 */
+#define OV5640_LENC_BRMATRX_22 0x5830 /* default 0x88 -> all 0x40 */
+#define OV5640_LENC_BRMATRX_23 0x5831 /* default 0x99 -> all 0x42 */
+#define OV5640_LENC_BRMATRX_24 0x5832 /* default 0xaa -> all 0x24 */
+#define OV5640_LENC_BRMATRX_30 0x5833 /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_31 0x5834 /* default 0x99 -> all 0x24 */
+#define OV5640_LENC_BRMATRX_32 0x5835 /* default 0x99 -> all 0x22 */
+#define OV5640_LENC_BRMATRX_33 0x5836 /* default 0x99 -> all 0x22 */
+#define OV5640_LENC_BRMATRX_34 0x5837 /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_40 0x5838 /* default 0xaa -> all 0x44 */
+#define OV5640_LENC_BRMATRX_41 0x5839 /* default 0xaa -> all 0x24 */
+#define OV5640_LENC_BRMATRX_42 0x583a /* default 0xaa -> all 0x26 */
+#define OV5640_LENC_BRMATRX_43 0x583b /* default 0xaa -> all 0x28 */
+#define OV5640_LENC_BRMATRX_44 0x583c /* default 0xaa -> all 0x42 */
+#define OV5640_LENC_BR_OFFSET 0x583d /* default 0x88 -> all 0xce */
+
+
static struct ov5640_reg mode_2592x1944[] = {
/* PLL Control MIPI bit rate/lane = 672MHz, 16-bit mode.
- * Output size: 2608x1948 (0, 0) - (2623, 1951),
+ * Output size: 2592x1944 (0, 0) - (2623, 1951),
* Line Length = 2844, Frame Length = 1968
+ * Scaling method: full resolution (dummy 16 pixel
+ * horizontal, 8 lines) 2608x1952 with dummy
*/
- {0x3103, 0x11},
- {0x3008, 0x82},
- {OV5640_TABLE_WAIT_MS, 5},
- {0x3008, 0x42},
- {0x3103, 0x03},
- {0x3017, 0x00},
- {0x3018, 0x00},
- {0x3034, 0x18},
- {0x3035, 0x11},
- {0x3036, 0x7d},
- {0x3037, 0x13},
- {0x3108, 0x01},
- {0x3630, 0x36},
- {0x3631, 0x0e},
- {0x3632, 0xe2},
- {0x3633, 0x12},
- {0x3621, 0xe0},
- {0x3704, 0xa0},
- {0x3703, 0x5a},
- {0x3715, 0x78},
- {0x3717, 0x01},
- {0x370b, 0x60},
- {0x3705, 0x1a},
- {0x3905, 0x02},
- {0x3906, 0x10},
- {0x3901, 0x0a},
- {0x3731, 0x12},
- {0x3600, 0x08},
- {0x3601, 0x33},
- {0x302d, 0x60},
- {0x3620, 0x52},
- {0x371b, 0x20},
- {0x471c, 0x50},
- {0x3a13, 0x43},
- {0x3a18, 0x00},
- {0x3a19, 0xf8},
- {0x3635, 0x13},
- {0x3636, 0x03},
- {0x3634, 0x40},
- {0x3622, 0x01},
- {0x3c01, 0x34},
- {0x3c04, 0x28},
- {0x3c05, 0x98},
- {0x3c06, 0x00},
- {0x3c07, 0x07},
- {0x3c08, 0x00},
- {0x3c09, 0x1c},
- {0x3c0a, 0x9c},
- {0x3c0b, 0x40},
- {0x3820, 0x40},
- {0x3821, 0x06},
- {0x3814, 0x11},
- {0x3815, 0x11},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x00},
- {0x3804, 0x0a},
- {0x3805, 0x3f},
- {0x3806, 0x07},
- {0x3807, 0x9f},
- {0x3808, 0x0a},
- {0x3809, 0x30},
- {0x380a, 0x07},
- {0x380b, 0x9c},
- {0x380c, 0x0b},
- {0x380d, 0x1c},
- {0x380e, 0x07},
- {0x380f, 0xb0},
- {0x3810, 0x00},
- {0x3811, 0x08},
- {0x3812, 0x00},
- {0x3813, 0x02},
- {0x3618, 0x04},
+
+ {OV5640_SC_PLL_CTRL0, 0x10 | MIPI_MODE_8BIT },
+ {OV5640_SC_PLL_CTRL1, 0x11},
+ {OV5640_SC_PLL_CTRL2, 0x7d}, /* default 0x69 PLL multipier */
+ {OV5640_SC_PLL_CTRL3, 0x03 | (1<<4) }, /* default 0x03 PLL divided by 2 */
+
+ {OV5640_MIPI_CTRL_00, 0x04},
+ {OV5640_MIPI_PCLK_PERIOD, 0x0a},
+
+ {OV5640_ISP_CTRL_00, 0xa7},
+ {OV5640_ISP_CTRL_01, 0x83},
+ {OV5640_ISP_MUX_CTRL, 0x00},
+
+ /* Timing Control */
+ {OV5640_TIMING_HS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_HS_L, OV5640_ADDR_L(0) },
+ {OV5640_TIMING_VS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_VS_L, OV5640_ADDR_L(0) },
+ {OV5640_TIMING_HW_H, OV5640_ADDR_H(2623) },
+ {OV5640_TIMING_HW_L, OV5640_ADDR_L(2623) },
+ {OV5640_TIMING_VH_H, OV5640_ADDR_H(1951) },
+ {OV5640_TIMING_VH_L, OV5640_ADDR_L(1951) },
+ {OV5640_TIMING_DVPHO_H, OV5640_WIDTH_H(2592) },
+ {OV5640_TIMING_DVPHO_L, OV5640_WIDTH_L(2592) },
+ {OV5640_TIMING_DVPVO_H, OV5640_HEIGHT_H(1944) },
+ {OV5640_TIMING_DVPVO_L, OV5640_HEIGHT_L(1944) },
+ {OV5640_TIMING_HTS_H, OV5640_HTS_H(2844) },
+ {OV5640_TIMING_HTS_L, OV5640_HTS_L(2844) },
+ {OV5640_TIMING_VTS_H, OV5640_VTS_H(1968) },
+ {OV5640_TIMING_VTS_L, OV5640_VTS_L(1968) },
+ {OV5640_TIMING_HOFFSET_H, OV5640_H_OFFSET_H(16) }, // (2623-2592-1)/2
+ {OV5640_TIMING_HOFFSET_L, OV5640_H_OFFSET_L(16) },
+ {OV5640_TIMING_VOFFSET_H, OV5640_V_OFFSET_H(4) }, // (1951-1944-1)/2
+ {OV5640_TIMING_VOFFSET_L, OV5640_V_OFFSET_L(4) },
+ {OV5640_TIMING_X_INC, 0x11},
+ {OV5640_TIMING_Y_INC, 0x11},
+ {OV5640_TIMING_REG20, 0x40},
+ {OV5640_TIMING_REG21, 0},
+
+ /* magic registers */
{0x3612, 0x2b},
+ {0x3618, 0x04},
{0x3708, 0x64},
{0x3709, 0x12},
{0x370c, 0x00},
- {0x3a02, 0x07},
- {0x3a03, 0xb0},
- {0x3a08, 0x01},
- {0x3a09, 0x27},
- {0x3a0a, 0x00},
- {0x3a0b, 0xf6},
- {0x3a0e, 0x06},
- {0x3a0d, 0x08},
- {0x3a14, 0x07},
- {0x3a15, 0xb0},
- {0x4001, 0x02},
- {0x4004, 0x06},
- {0x3000, 0x00},
- {0x3002, 0x1c},
- {0x3004, 0xff},
- {0x3006, 0xc3},
- {0x300e, 0x45},
- {0x302e, 0x08},
- {0x4300, 0x32},
- {0x4800, 0x24},
- {0x4837, 0x0a},
- {0x501f, 0x00},
- {0x440e, 0x00},
- {0x5000, 0xa7},
- {0x5001, 0x83},
- {0x5180, 0xff},
- {0x5181, 0xf2},
- {0x5182, 0x00},
- {0x5183, 0x14},
- {0x5184, 0x25},
- {0x5185, 0x24},
- {0x5186, 0x09},
- {0x5187, 0x09},
- {0x5188, 0x09},
- {0x5189, 0x75},
- {0x518a, 0x54},
- {0x518b, 0xe0},
- {0x518c, 0xb2},
- {0x518d, 0x42},
- {0x518e, 0x3d},
- {0x518f, 0x56},
- {0x5190, 0x46},
- {0x5191, 0xf8},
- {0x5192, 0x04},
- {0x5193, 0x70},
- {0x5194, 0xf0},
- {0x5195, 0xf0},
- {0x5196, 0x03},
- {0x5197, 0x01},
- {0x5198, 0x04},
- {0x5199, 0x12},
- {0x519a, 0x04},
- {0x519b, 0x00},
- {0x519c, 0x06},
- {0x519d, 0x82},
- {0x519e, 0x38},
- {0x5381, 0x1e},
- {0x5382, 0x5b},
- {0x5383, 0x08},
- {0x5384, 0x0a},
- {0x5385, 0x7e},
- {0x5386, 0x88},
- {0x5387, 0x7c},
- {0x5388, 0x6c},
- {0x5389, 0x10},
- {0x538a, 0x01},
- {0x538b, 0x98},
- {0x5300, 0x08},
- {0x5301, 0x30},
- {0x5302, 0x10},
- {0x5303, 0x00},
- {0x5304, 0x08},
- {0x5305, 0x30},
- {0x5306, 0x08},
- {0x5307, 0x16},
- {0x5309, 0x08},
- {0x530a, 0x30},
- {0x530b, 0x04},
- {0x530c, 0x06},
- {0x5480, 0x01},
- {0x5481, 0x08},
- {0x5482, 0x14},
- {0x5483, 0x28},
- {0x5484, 0x51},
- {0x5485, 0x65},
- {0x5486, 0x71},
- {0x5487, 0x7d},
- {0x5488, 0x87},
- {0x5489, 0x91},
- {0x548a, 0x9a},
- {0x548b, 0xaa},
- {0x548c, 0xb8},
- {0x548d, 0xcd},
- {0x548e, 0xdd},
- {0x548f, 0xea},
- {0x5490, 0x1d},
- {0x5580, 0x02},
- {0x5583, 0x40},
- {0x5584, 0x10},
- {0x5589, 0x10},
- {0x558a, 0x00},
- {0x558b, 0xf8},
- {0x5800, 0x23},
- {0x5801, 0x14},
- {0x5802, 0x0f},
- {0x5803, 0x0f},
- {0x5804, 0x12},
- {0x5805, 0x26},
- {0x5806, 0x0c},
- {0x5807, 0x08},
- {0x5808, 0x05},
- {0x5809, 0x05},
- {0x580a, 0x08},
- {0x580b, 0x0d},
- {0x580c, 0x08},
- {0x580d, 0x03},
- {0x580e, 0x00},
- {0x580f, 0x00},
- {0x5810, 0x03},
- {0x5811, 0x09},
- {0x5812, 0x07},
- {0x5813, 0x03},
- {0x5814, 0x00},
- {0x5815, 0x01},
- {0x5816, 0x03},
- {0x5817, 0x08},
- {0x5818, 0x0d},
- {0x5819, 0x08},
- {0x581a, 0x05},
- {0x581b, 0x06},
- {0x581c, 0x08},
- {0x581d, 0x0e},
- {0x581e, 0x29},
- {0x581f, 0x17},
- {0x5820, 0x11},
- {0x5821, 0x11},
- {0x5822, 0x15},
- {0x5823, 0x28},
- {0x5824, 0x46},
- {0x5825, 0x26},
- {0x5826, 0x08},
- {0x5827, 0x26},
- {0x5828, 0x64},
- {0x5829, 0x26},
- {0x582a, 0x24},
- {0x582b, 0x22},
- {0x582c, 0x24},
- {0x582d, 0x24},
- {0x582e, 0x06},
- {0x582f, 0x22},
- {0x5830, 0x40},
- {0x5831, 0x42},
- {0x5832, 0x24},
- {0x5833, 0x26},
- {0x5834, 0x24},
- {0x5835, 0x22},
- {0x5836, 0x22},
- {0x5837, 0x26},
- {0x5838, 0x44},
- {0x5839, 0x24},
- {0x583a, 0x26},
- {0x583b, 0x28},
- {0x583c, 0x42},
- {0x583d, 0xce},
- {0x5025, 0x00},
- {0x3a0f, 0x30},
- {0x3a10, 0x28},
- {0x3a1b, 0x30},
- {0x3a1e, 0x26},
- {0x3a11, 0x60},
- {0x3a1f, 0x14},
- {0x3008, 0x02},
+
+ /* AEC/AGC Power Down Domain Control */
+ {OV5640_AEC_MAX_EXPO60_H, 0x07},
+ {OV5640_AEC_MAX_EXPO60_L, 0xb0},
+ {OV5640_AEC_B50_STEP_H, 0x01},
+ {OV5640_AEC_B50_STEP_L, 0x27},
+ {OV5640_AEC_B60_STEP_H, 0x00},
+ {OV5640_AEC_B60_STEP_L, 0xf6},
+ {OV5640_AEC_CTRL_0E, 0x06},
+ {OV5640_AEC_CTRL_0D, 0x08},
+ {OV5640_AEC_MAX_EXPO_H, 0x07},
+ {OV5640_AEC_MAX_EXPO_L, 0xb0},
+ {OV5640_AEC_CTRL_13, 0x43},
+ {OV5640_AEC_GAIN_CEILING_0, 0x00},
+ {OV5640_AEC_GAIN_CEILING_1, 0xf8},
+ {OV5640_AEC_CTRL_0F, 0x30},
+ {OV5640_AEC_CTRL_10, 0x28},
+ {OV5640_AEC_CTRL_1B, 0x30},
+ {OV5640_AEC_CTRL_1E, 0x26},
+ {OV5640_AEC_CTRL_11, 0x60},
+ {OV5640_AEC_CTRL_1F, 0x14},
+
+ {OV5640_SYSTEM_CTRL, 0x02},
{OV5640_TABLE_END, 0x0000}
};
static struct ov5640_reg mode_1920x1080[] = {
/* PLL Control MIPI bit rate/lane = 672MHz, 16-bit mode.
- * Output size: 1936x1096 (336, 426) - (2287, 1529),
+ * Output size: 1936x1088 (336, 426) - (2287, 1529),
* Line Length = 2500, Frame Length = 1120.
+ * Scaling method: cropping from full resolution
+ * 1936x1088 with dummy pixels
*/
- {0x3103, 0x11},
- {0x3008, 0x82},
- {OV5640_TABLE_WAIT_MS, 5},
- {0x3008, 0x42},
- {0x3103, 0x03},
- {0x3017, 0x00},
- {0x3018, 0x00},
- {0x3034, 0x18},
- {0x3035, 0x11},
- {0x3036, 0x54},
- {0x3037, 0x13},
- {0x3108, 0x01},
- {0x3630, 0x36},
- {0x3631, 0x0e},
- {0x3632, 0xe2},
- {0x3633, 0x12},
- {0x3621, 0xe0},
- {0x3704, 0xa0},
- {0x3703, 0x5a},
- {0x3715, 0x78},
- {0x3717, 0x01},
- {0x370b, 0x60},
- {0x3705, 0x1a},
- {0x3905, 0x02},
- {0x3906, 0x10},
- {0x3901, 0x0a},
- {0x3731, 0x12},
- {0x3600, 0x08},
- {0x3601, 0x33},
- {0x302d, 0x60},
- {0x3620, 0x52},
- {0x371b, 0x20},
- {0x471c, 0x50},
- {0x3a13, 0x43},
- {0x3a18, 0x00},
- {0x3a19, 0xf8},
- {0x3635, 0x13},
- {0x3636, 0x03},
- {0x3634, 0x40},
- {0x3622, 0x01},
- {0x3c01, 0x34},
- {0x3c04, 0x28},
- {0x3c05, 0x98},
- {0x3c06, 0x00},
- {0x3c07, 0x07},
- {0x3c08, 0x00},
- {0x3c09, 0x1c},
- {0x3c0a, 0x9c},
- {0x3c0b, 0x40},
- {0x3820, 0x40},
- {0x3821, 0x06},
- {0x3814, 0x11},
- {0x3815, 0x11},
- {0x3800, 0x01},
- {0x3801, 0x50},
- {0x3802, 0x01},
- {0x3803, 0xaa},
- {0x3804, 0x08},
- {0x3805, 0xef},
- {0x3806, 0x05},
- {0x3807, 0xf9},
- {0x3808, 0x07},
- {0x3809, 0x90},
- {0x380a, 0x04},
- {0x380b, 0x48},
- {0x380c, 0x09},
- {0x380d, 0xc4},
- {0x380e, 0x04},
- {0x380f, 0x60},
- {0x3810, 0x00},
- {0x3811, 0x08},
- {0x3812, 0x00},
- {0x3813, 0x04},
- {0x3618, 0x04},
+
+ {OV5640_SC_PLL_CTRL0, 0x10 | MIPI_MODE_8BIT },
+ {OV5640_SC_PLL_CTRL1, 0x11},
+ {OV5640_SC_PLL_CTRL2, 0x54}, /* default 0x69 PLL multipier */
+ {OV5640_SC_PLL_CTRL3, 0x03 | (1<<4) }, /* default 0x03 PLL divided by 2 */
+
+ {OV5640_MIPI_CTRL_00, 0x04},
+ {OV5640_MIPI_PCLK_PERIOD, 0x0a},
+
+ {OV5640_ISP_CTRL_00, 0xa7},
+ {OV5640_ISP_CTRL_01, 0x83},
+ {OV5640_ISP_MUX_CTRL, 0x00},
+
+ /* Timing Control */
+ {OV5640_TIMING_HS_H, OV5640_ADDR_H(336) },
+ {OV5640_TIMING_HS_L, OV5640_ADDR_L(336) },
+ {OV5640_TIMING_VS_H, OV5640_ADDR_H(426) },
+ {OV5640_TIMING_VS_L, OV5640_ADDR_L(426) },
+ {OV5640_TIMING_HW_H, OV5640_ADDR_H(2287) },
+ {OV5640_TIMING_HW_L, OV5640_ADDR_L(2287) },
+ {OV5640_TIMING_VH_H, OV5640_ADDR_H(1529) },
+ {OV5640_TIMING_VH_L, OV5640_ADDR_L(1529) },
+ {OV5640_TIMING_DVPHO_H, OV5640_WIDTH_H(1920) }, //1936
+ {OV5640_TIMING_DVPHO_L, OV5640_WIDTH_L(1920) },
+ {OV5640_TIMING_DVPVO_H, OV5640_HEIGHT_H(1080) }, //1088
+ {OV5640_TIMING_DVPVO_L, OV5640_HEIGHT_L(1080) },
+ {OV5640_TIMING_HTS_H, OV5640_HTS_H(2500) },
+ {OV5640_TIMING_HTS_L, OV5640_HTS_L(2500) },
+ {OV5640_TIMING_VTS_H, OV5640_VTS_H(1120) },
+ {OV5640_TIMING_VTS_L, OV5640_VTS_L(1120) },
+ {OV5640_TIMING_HOFFSET_H, OV5640_H_OFFSET_H(15) }, // (2287-336-1936-1)/2
+ {OV5640_TIMING_HOFFSET_L, OV5640_H_OFFSET_L(15) },
+ {OV5640_TIMING_VOFFSET_H, OV5640_V_OFFSET_H(11) }, // (1529-426-1088-1)/2
+ {OV5640_TIMING_VOFFSET_L, OV5640_V_OFFSET_L(11) },
+ {OV5640_TIMING_X_INC, 0x11},
+ {OV5640_TIMING_Y_INC, 0x11},
+ {OV5640_TIMING_REG20, 0x40},
+ {OV5640_TIMING_REG21, 0},
+
+ /* magic registers */
{0x3612, 0x2b},
+ {0x3618, 0x04},
{0x3708, 0x64},
{0x3709, 0x12},
{0x370c, 0x00},
- {0x3a02, 0x04},
- {0x3a03, 0x60},
- {0x3a08, 0x01},
- {0x3a09, 0x50},
- {0x3a0a, 0x01},
- {0x3a0b, 0x18},
- {0x3a0e, 0x03},
- {0x3a0d, 0x04},
- {0x3a14, 0x04},
- {0x3a15, 0x60},
- {0x4001, 0x02},
- {0x4004, 0x06},
- {0x3000, 0x00},
- {0x3002, 0x1c},
- {0x3004, 0xff},
- {0x3006, 0xc3},
- {0x300e, 0x45},
- {0x302e, 0x08},
- {0x4300, 0x32},
- {0x501f, 0x00},
- {0x4713, 0x02},
- {0x4407, 0x04},
- {0x440e, 0x00},
- {0x460b, 0x37},
- {0x460c, 0x20},
- {0x4800, 0x24},
- {0x4837, 0x0a},
- {0x3824, 0x04},
- {0x5000, 0xa7},
- {0x5001, 0x83},
- {0x5180, 0xff},
- {0x5181, 0xf2},
- {0x5182, 0x00},
- {0x5183, 0x14},
- {0x5184, 0x25},
- {0x5185, 0x24},
- {0x5186, 0x09},
- {0x5187, 0x09},
- {0x5188, 0x09},
- {0x5189, 0x75},
- {0x518a, 0x54},
- {0x518b, 0xe0},
- {0x518c, 0xb2},
- {0x518d, 0x42},
- {0x518e, 0x3d},
- {0x518f, 0x56},
- {0x5190, 0x46},
- {0x5191, 0xf8},
- {0x5192, 0x04},
- {0x5193, 0x70},
- {0x5194, 0xf0},
- {0x5195, 0xf0},
- {0x5196, 0x03},
- {0x5197, 0x01},
- {0x5198, 0x04},
- {0x5199, 0x12},
- {0x519a, 0x04},
- {0x519b, 0x00},
- {0x519c, 0x06},
- {0x519d, 0x82},
- {0x519e, 0x38},
- {0x5381, 0x1e},
- {0x5382, 0x5b},
- {0x5383, 0x08},
- {0x5384, 0x0a},
- {0x5385, 0x7e},
- {0x5386, 0x88},
- {0x5387, 0x7c},
- {0x5388, 0x6c},
- {0x5389, 0x10},
- {0x538a, 0x01},
- {0x538b, 0x98},
- {0x5300, 0x08},
- {0x5301, 0x30},
- {0x5302, 0x10},
- {0x5303, 0x00},
- {0x5304, 0x08},
- {0x5305, 0x30},
- {0x5306, 0x08},
- {0x5307, 0x16},
- {0x5309, 0x08},
- {0x530a, 0x30},
- {0x530b, 0x04},
- {0x530c, 0x06},
- {0x5480, 0x01},
- {0x5481, 0x08},
- {0x5482, 0x14},
- {0x5483, 0x28},
- {0x5484, 0x51},
- {0x5485, 0x65},
- {0x5486, 0x71},
- {0x5487, 0x7d},
-
- {0x5488, 0x87},
- {0x5489, 0x91},
- {0x548a, 0x9a},
- {0x548b, 0xaa},
- {0x548c, 0xb8},
- {0x548d, 0xcd},
- {0x548e, 0xdd},
- {0x548f, 0xea},
- {0x5490, 0x1d},
- {0x5580, 0x02},
- {0x5583, 0x40},
- {0x5584, 0x10},
- {0x5589, 0x10},
- {0x558a, 0x00},
- {0x558b, 0xf8},
- {0x5800, 0x23},
- {0x5801, 0x14},
- {0x5802, 0x0f},
- {0x5803, 0x0f},
- {0x5804, 0x12},
- {0x5805, 0x26},
- {0x5806, 0x0c},
- {0x5807, 0x08},
- {0x5808, 0x05},
- {0x5809, 0x05},
- {0x580a, 0x08},
- {0x580b, 0x0d},
- {0x580c, 0x08},
- {0x580d, 0x03},
- {0x580e, 0x00},
- {0x580f, 0x00},
- {0x5810, 0x03},
- {0x5811, 0x09},
- {0x5812, 0x07},
- {0x5813, 0x03},
- {0x5814, 0x00},
- {0x5815, 0x01},
- {0x5816, 0x03},
- {0x5817, 0x08},
- {0x5818, 0x0d},
- {0x5819, 0x08},
- {0x581a, 0x05},
- {0x581b, 0x06},
- {0x581c, 0x08},
- {0x581d, 0x0e},
- {0x581e, 0x29},
- {0x581f, 0x17},
- {0x5820, 0x11},
- {0x5821, 0x11},
- {0x5822, 0x15},
- {0x5823, 0x28},
- {0x5824, 0x46},
- {0x5825, 0x26},
- {0x5826, 0x08},
- {0x5827, 0x26},
- {0x5828, 0x64},
- {0x5829, 0x26},
- {0x582a, 0x24},
- {0x582b, 0x22},
- {0x582c, 0x24},
- {0x582d, 0x24},
- {0x582e, 0x06},
- {0x582f, 0x22},
- {0x5830, 0x40},
- {0x5831, 0x42},
- {0x5832, 0x24},
- {0x5833, 0x26},
- {0x5834, 0x24},
- {0x5835, 0x22},
- {0x5836, 0x22},
- {0x5837, 0x26},
- {0x5838, 0x44},
- {0x5839, 0x24},
- {0x583a, 0x26},
- {0x583b, 0x28},
- {0x583c, 0x42},
- {0x583d, 0xce},
- {0x5025, 0x00},
- {0x3a0f, 0x30},
- {0x3a10, 0x28},
- {0x3a1b, 0x30},
- {0x3a1e, 0x26},
- {0x3a11, 0x60},
- {0x3a1f, 0x14},
- {0x3008, 0x02},
+
+ /* AEC/AGC Power Down Domain Control */
+ {OV5640_AEC_MAX_EXPO60_H, 0x04},
+ {OV5640_AEC_MAX_EXPO60_L, 0x60},
+ {OV5640_AEC_B50_STEP_H, 0x01},
+ {OV5640_AEC_B50_STEP_L, 0x50},
+ {OV5640_AEC_B60_STEP_H, 0x01},
+ {OV5640_AEC_B60_STEP_L, 0x18},
+ {OV5640_AEC_CTRL_0E, 0x03},
+ {OV5640_AEC_CTRL_0D, 0x04},
+ {OV5640_AEC_MAX_EXPO_H, 0x04},
+ {OV5640_AEC_MAX_EXPO_L, 0x60},
+ {OV5640_AEC_CTRL_13, 0x43},
+ {OV5640_AEC_GAIN_CEILING_0, 0x00},
+ {OV5640_AEC_GAIN_CEILING_1, 0xf8},
+ {OV5640_AEC_CTRL_0F, 0x30},
+ {OV5640_AEC_CTRL_10, 0x28},
+ {OV5640_AEC_CTRL_1B, 0x30},
+ {OV5640_AEC_CTRL_1E, 0x26},
+ {OV5640_AEC_CTRL_11, 0x60},
+ {OV5640_AEC_CTRL_1F, 0x14},
+
+ {OV5640_SYSTEM_CTRL, 0x02},
{OV5640_TABLE_END, 0x0000}
};
-static struct ov5640_reg mode_1296x972[] = {
+static struct ov5640_reg mode_1280x960[] = {
/* PLL Control MIPI bit rate/lane = 448MHz, 16-bit mode.
* Output size: 1304x972 (0, 0) - (2623, 1951),
* Line Length = 1886, Frame Length = 990.
+ * Scaling method: subsampling in vertical and horizontal
+ * 1296x968 supports 2x2 binning
*/
- {0x3103, 0x11},
- {0x3008, 0x82},
- {OV5640_TABLE_WAIT_MS, 5},
- {0x3008, 0x42},
- {0x3103, 0x03},
- {0x3017, 0x00},
- {0x3018, 0x00},
- {0x3034, 0x18},
- {0x3035, 0x21},
- {0x3036, 0x70},
- {0x3037, 0x13},
- {0x3108, 0x01},
- {0x3630, 0x36},
- {0x3631, 0x0e},
- {0x3632, 0xe2},
- {0x3633, 0x12},
- {0x3621, 0xe0},
- {0x3704, 0xa0},
- {0x3703, 0x5a},
- {0x3715, 0x78},
- {0x3717, 0x01},
- {0x370b, 0x60},
- {0x3705, 0x1a},
- {0x3905, 0x02},
- {0x3906, 0x10},
- {0x3901, 0x0a},
- {0x3731, 0x12},
- {0x3600, 0x08},
- {0x3601, 0x33},
- {0x302d, 0x60},
- {0x3620, 0x52},
- {0x371b, 0x20},
- {0x471c, 0x50},
- {0x3a13, 0x43},
- {0x3a18, 0x00},
- {0x3a19, 0xf8},
- {0x3635, 0x13},
- {0x3636, 0x03},
- {0x3634, 0x40},
- {0x3622, 0x01},
- {0x3c01, 0x34},
- {0x3c04, 0x28},
- {0x3c05, 0x98},
- {0x3c06, 0x00},
- {0x3c07, 0x07},
- {0x3c08, 0x00},
- {0x3c09, 0x1c},
- {0x3c0a, 0x9c},
- {0x3c0b, 0x40},
- {0x3820, 0x41},
- {0x3821, 0x07},
- {0x3814, 0x31},
- {0x3815, 0x31},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x00},
- {0x3804, 0x0a},
- {0x3805, 0x3f},
- {0x3806, 0x07},
- {0x3807, 0x9f},
- {0x3808, 0x05},
- {0x3809, 0x18},
- {0x380a, 0x03},
- {0x380b, 0xcc},
- {0x380c, 0x07},
- {0x380d, 0x5e},
- {0x380e, 0x03},
- {0x380f, 0xde},
- {0x3810, 0x00},
- {0x3811, 0x06},
- {0x3812, 0x00},
- {0x3813, 0x02},
- {0x3618, 0x00},
+
+ {OV5640_SC_PLL_CTRL0, 0x10 | MIPI_MODE_8BIT },
+ {OV5640_SC_PLL_CTRL1, 0x21},
+ {OV5640_SC_PLL_CTRL2, 0x70}, /* default 0x69 PLL multipier */
+ {OV5640_SC_PLL_CTRL3, 0x03 | (1<<4) }, /* default 0x03 PLL divided by 2 */
+
+ {OV5640_MIPI_CTRL_00, 0x04},
+ {OV5640_MIPI_PCLK_PERIOD, 0x10},
+
+ {OV5640_ISP_CTRL_00, 0xa7},
+ {OV5640_ISP_CTRL_01, 0x83},
+ {OV5640_ISP_MUX_CTRL, 0x00},
+
+ /* Timing Control */
+ {OV5640_TIMING_HS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_HS_L, OV5640_ADDR_L(0) },
+ {OV5640_TIMING_VS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_VS_L, OV5640_ADDR_L(0) },
+ {OV5640_TIMING_HW_H, OV5640_ADDR_H(2623) },
+ {OV5640_TIMING_HW_L, OV5640_ADDR_L(2623) },
+ {OV5640_TIMING_VH_H, OV5640_ADDR_H(1951) },
+ {OV5640_TIMING_VH_L, OV5640_ADDR_L(1951) },
+ {OV5640_TIMING_DVPHO_H, OV5640_WIDTH_H(1280) },
+ {OV5640_TIMING_DVPHO_L, OV5640_WIDTH_L(1280) },
+ {OV5640_TIMING_DVPVO_H, OV5640_HEIGHT_H(960) },
+ {OV5640_TIMING_DVPVO_L, OV5640_HEIGHT_L(960) },
+ {OV5640_TIMING_HTS_H, OV5640_HTS_H(1886) },
+ {OV5640_TIMING_HTS_L, OV5640_HTS_L(1886) },
+ {OV5640_TIMING_VTS_H, OV5640_VTS_H(990) },
+ {OV5640_TIMING_VTS_L, OV5640_VTS_L(990) },
+ {OV5640_TIMING_HOFFSET_H, OV5640_H_OFFSET_H(31) }, // (2623-0-(1280*2)-1)/2
+ {OV5640_TIMING_HOFFSET_L, OV5640_H_OFFSET_L(31) },
+ {OV5640_TIMING_VOFFSET_H, OV5640_V_OFFSET_H(15) }, // (1951-0-(960*2)-1)/2
+ {OV5640_TIMING_VOFFSET_L, OV5640_V_OFFSET_L(15) },
+ {OV5640_TIMING_X_INC, 0x11 | (3<<4) },
+ {OV5640_TIMING_Y_INC, 0x11 | (3<<4) },
+ {OV5640_TIMING_REG20, 0x40},
+ {OV5640_TIMING_REG21, REG21_BINNING_EN},
+
+ /* magic registers */
{0x3612, 0x29},
+ {0x3618, 0x00},
{0x3708, 0x62},
{0x3709, 0x52},
{0x370c, 0x03},
- {0x3a02, 0x03},
- {0x3a03, 0xd8},
- {0x3a08, 0x01},
- {0x3a09, 0x27},
- {0x3a0a, 0x00},
- {0x3a0b, 0xf6},
- {0x3a0e, 0x03},
- {0x3a0d, 0x04},
- {0x3a14, 0x03},
- {0x3a15, 0xd8},
- {0x4001, 0x02},
- {0x4004, 0x02},
- {0x3000, 0x00},
- {0x3002, 0x1c},
- {0x3004, 0xff},
- {0x3006, 0xc3},
- {0x300e, 0x45},
- {0x302e, 0x08},
- {0x4300, 0x32},
- {0x501f, 0x00},
- {0x4713, 0x02},
- {0x4407, 0x04},
- {0x440e, 0x00},
- {0x460b, 0x37},
- {0x460c, 0x20},
- {0x4800, 0x24},
- {0x4837, 0x10},
- {0x3824, 0x04},
- {0x5000, 0xa7},
- {0x5001, 0x83},
- {0x5180, 0xff},
- {0x5181, 0xf2},
- {0x5182, 0x00},
- {0x5183, 0x14},
- {0x5184, 0x25},
- {0x5185, 0x24},
- {0x5186, 0x09},
- {0x5187, 0x09},
- {0x5188, 0x09},
- {0x5189, 0x75},
- {0x518a, 0x54},
- {0x518b, 0xe0},
- {0x518c, 0xb2},
- {0x518d, 0x42},
- {0x518e, 0x3d},
- {0x518f, 0x56},
- {0x5190, 0x46},
- {0x5191, 0xf8},
- {0x5192, 0x04},
- {0x5193, 0x70},
- {0x5194, 0xf0},
- {0x5195, 0xf0},
- {0x5196, 0x03},
- {0x5197, 0x01},
- {0x5198, 0x04},
- {0x5199, 0x12},
- {0x519a, 0x04},
- {0x519b, 0x00},
- {0x519c, 0x06},
- {0x519d, 0x82},
- {0x519e, 0x38},
- {0x5381, 0x1e},
- {0x5382, 0x5b},
- {0x5383, 0x08},
- {0x5384, 0x0a},
- {0x5385, 0x7e},
- {0x5386, 0x88},
- {0x5387, 0x7c},
- {0x5388, 0x6c},
- {0x5389, 0x10},
- {0x538a, 0x01},
- {0x538b, 0x98},
- {0x5300, 0x08},
- {0x5301, 0x30},
- {0x5302, 0x10},
- {0x5303, 0x00},
- {0x5304, 0x08},
- {0x5305, 0x30},
- {0x5306, 0x08},
- {0x5307, 0x16},
- {0x5309, 0x08},
- {0x530a, 0x30},
- {0x530b, 0x04},
- {0x530c, 0x06},
- {0x5480, 0x01},
- {0x5481, 0x08},
- {0x5482, 0x14},
- {0x5483, 0x28},
- {0x5484, 0x51},
- {0x5485, 0x65},
- {0x5486, 0x71},
- {0x5487, 0x7d},
- {0x5488, 0x87},
- {0x5489, 0x91},
- {0x548a, 0x9a},
- {0x548b, 0xaa},
- {0x548c, 0xb8},
- {0x548d, 0xcd},
- {0x548e, 0xdd},
- {0x548f, 0xea},
- {0x5490, 0x1d},
- {0x5580, 0x02},
- {0x5583, 0x40},
- {0x5584, 0x10},
- {0x5589, 0x10},
- {0x558a, 0x00},
- {0x558b, 0xf8},
- {0x5800, 0x23},
- {0x5801, 0x14},
- {0x5802, 0x0f},
- {0x5803, 0x0f},
- {0x5804, 0x12},
- {0x5805, 0x26},
- {0x5806, 0x0c},
- {0x5807, 0x08},
- {0x5808, 0x05},
- {0x5809, 0x05},
- {0x580a, 0x08},
- {0x580b, 0x0d},
- {0x580c, 0x08},
- {0x580d, 0x03},
- {0x580e, 0x00},
- {0x580f, 0x00},
- {0x5810, 0x03},
- {0x5811, 0x09},
- {0x5812, 0x07},
- {0x5813, 0x03},
- {0x5814, 0x00},
- {0x5815, 0x01},
- {0x5816, 0x03},
- {0x5817, 0x08},
- {0x5818, 0x0d},
- {0x5819, 0x08},
- {0x581a, 0x05},
- {0x581b, 0x06},
- {0x581c, 0x08},
- {0x581d, 0x0e},
- {0x581e, 0x29},
- {0x581f, 0x17},
- {0x5820, 0x11},
- {0x5821, 0x11},
- {0x5822, 0x15},
- {0x5823, 0x28},
- {0x5824, 0x46},
- {0x5825, 0x26},
- {0x5826, 0x08},
- {0x5827, 0x26},
- {0x5828, 0x64},
- {0x5829, 0x26},
- {0x582a, 0x24},
- {0x582b, 0x22},
- {0x582c, 0x24},
- {0x582d, 0x24},
- {0x582e, 0x06},
- {0x582f, 0x22},
- {0x5830, 0x40},
- {0x5831, 0x42},
- {0x5832, 0x24},
- {0x5833, 0x26},
- {0x5834, 0x24},
- {0x5835, 0x22},
- {0x5836, 0x22},
- {0x5837, 0x26},
- {0x5838, 0x44},
- {0x5839, 0x24},
- {0x583a, 0x26},
- {0x583b, 0x28},
- {0x583c, 0x42},
- {0x583d, 0xce},
- {0x5025, 0x00},
- {0x3a0f, 0x30},
- {0x3a10, 0x28},
- {0x3a1b, 0x30},
- {0x3a1e, 0x26},
- {0x3a11, 0x60},
- {0x3a1f, 0x14},
- {0x3008, 0x02},
+
+ /* AEC/AGC Power Down Domain Control */
+ {OV5640_AEC_MAX_EXPO60_H, 0x03},
+ {OV5640_AEC_MAX_EXPO60_L, 0xd8},
+ {OV5640_AEC_B50_STEP_H, 0x01},
+ {OV5640_AEC_B50_STEP_L, 0x27},
+ {OV5640_AEC_B60_STEP_H, 0x00},
+ {OV5640_AEC_B60_STEP_L, 0xf6},
+ {OV5640_AEC_CTRL_0E, 0x03},
+ {OV5640_AEC_CTRL_0D, 0x04},
+ {OV5640_AEC_MAX_EXPO_H, 0x03},
+ {OV5640_AEC_MAX_EXPO_L, 0xd8},
+ {OV5640_AEC_CTRL_13, 0x43},
+ {OV5640_AEC_GAIN_CEILING_0, 0x00},
+ {OV5640_AEC_GAIN_CEILING_1, 0xf8},
+ {OV5640_AEC_CTRL_0F, 0x30},
+ {OV5640_AEC_CTRL_10, 0x28},
+ {OV5640_AEC_CTRL_1B, 0x30},
+ {OV5640_AEC_CTRL_1E, 0x26},
+ {OV5640_AEC_CTRL_11, 0x60},
+ {OV5640_AEC_CTRL_1F, 0x14},
+
+ {OV5640_SYSTEM_CTRL, 0x02},
{OV5640_TABLE_END, 0x0000}
};
static struct ov5640_reg mode_640x480[] = {
- {0x3103, 0x11},
- {0x3008, 0x82},
- {OV5640_TABLE_WAIT_MS, 5},
- {0x3008, 0x42},
- {0x3103, 0x03},
- {0x3017, 0x00},
- {0x3018, 0x00},
- {0x3034, 0x18},
- {0x3035, 0x14},
- {0x3036, 0x70},
- {0x3037, 0x13},
- {0x4800, 0x24}, /* noncontinuous clock */
- {0x3108, 0x01},
- {0x3630, 0x36},
- {0x3631, 0x0e},
- {0x3632, 0xe2},
- {0x3633, 0x12},
- {0x3621, 0xe0},
- {0x3704, 0xa0},
- {0x3703, 0x5a},
- {0x3715, 0x78},
- {0x3717, 0x01},
- {0x370b, 0x60},
- {0x3705, 0x1a},
- {0x3905, 0x02},
- {0x3906, 0x10},
- {0x3901, 0x0a},
- {0x3731, 0x12},
- {0x3600, 0x08},
- {0x3601, 0x33},
- {0x302d, 0x60},
- {0x3620, 0x52},
- {0x371b, 0x20},
- {0x471c, 0x50},
- {0x3a13, 0x43},
- {0x3a18, 0x00},
- {0x3a19, 0xf8},
- {0x3635, 0x13},
- {0x3636, 0x03},
- {0x3634, 0x40},
- {0x3622, 0x01},
- {0x3c01, 0x34},
- {0x3c04, 0x28},
- {0x3c05, 0x98},
- {0x3c06, 0x00},
- {0x3c07, 0x08},
- {0x3c08, 0x00},
- {0x3c09, 0x1c},
- {0x3c0a, 0x9c},
- {0x3c0b, 0x40},
- {0x3820, 0x41},
- {0x3821, 0x07},
- {0x3814, 0x31},
- {0x3815, 0x31},
- {0x3800, 0x00},
- {0x3801, 0x00},
- {0x3802, 0x00},
- {0x3803, 0x04},
- {0x3804, 0x0a},
- {0x3805, 0x3f},
- {0x3806, 0x07},
- {0x3807, 0x9b},
- {0x3808, 0x02},
- {0x3809, 0x80},
- {0x380a, 0x01},
- {0x380b, 0xe0},
- {0x380c, 0x07},
- {0x380d, 0x68},
- {0x380e, 0x03},
- {0x380f, 0xd8},
- {0x3810, 0x00},
- {0x3811, 0x10},
- {0x3812, 0x00},
- {0x3813, 0x06},
- {0x3618, 0x00},
+ /* PLL Control MIPI bit rate/lane = ?MHz, 16-bit mode.
+ * Output size: 640x480 (0, 4) - (2623, 1951),
+ * Line Length = 1896, Frame Length = 984.
+ * Scaling method: subsampling from 1280x960
+ * 648x484 with dummy
+ * supports 2x2 binning
+ */
+
+ {OV5640_SC_PLL_CTRL0, 0x10 | MIPI_MODE_8BIT },
+ {OV5640_SC_PLL_CTRL1, 0x14},
+ {OV5640_SC_PLL_CTRL2, 0x70}, /* default 0x69 PLL multipier */
+ {OV5640_SC_PLL_CTRL3, 0x03 | (1<<4) }, /* default 0x03 PLL divided by 2 */
+
+ {OV5640_MIPI_CTRL_00, 0x04},
+ {OV5640_MIPI_PCLK_PERIOD, 0x44},
+
+ {OV5640_ISP_CTRL_00, 0xa7},
+ {OV5640_ISP_CTRL_01, 0x83 | ISP_SCALE_EN},
+ {OV5640_ISP_MUX_CTRL, 0x00},
+
+ /* Timing Control */
+ {OV5640_TIMING_HS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_HS_L, OV5640_ADDR_L(0) },
+ {OV5640_TIMING_VS_H, OV5640_ADDR_H(0) },
+ {OV5640_TIMING_VS_L, OV5640_ADDR_L(4) },
+ {OV5640_TIMING_HW_H, OV5640_ADDR_H(2623) },
+ {OV5640_TIMING_HW_L, OV5640_ADDR_L(2623) },
+ {OV5640_TIMING_VH_H, OV5640_ADDR_H(1947) },
+ {OV5640_TIMING_VH_L, OV5640_ADDR_L(1947) },
+ {OV5640_TIMING_DVPHO_H, OV5640_WIDTH_H(640) },
+ {OV5640_TIMING_DVPHO_L, OV5640_WIDTH_L(640) },
+ {OV5640_TIMING_DVPVO_H, OV5640_HEIGHT_H(480) },
+ {OV5640_TIMING_DVPVO_L, OV5640_HEIGHT_L(480) },
+ {OV5640_TIMING_HTS_H, OV5640_HTS_H(1896) },
+ {OV5640_TIMING_HTS_L, OV5640_HTS_L(1896) },
+ {OV5640_TIMING_VTS_H, OV5640_VTS_H(984) },
+ {OV5640_TIMING_VTS_L, OV5640_VTS_L(984) },
+ {OV5640_TIMING_HOFFSET_H, OV5640_H_OFFSET_H(31) }, // (2623-0-(640*2*2)-1)/2
+ {OV5640_TIMING_HOFFSET_L, OV5640_H_OFFSET_L(31) },
+ {OV5640_TIMING_VOFFSET_H, OV5640_V_OFFSET_H(11) }, // (1947-4-(480*2*2)-1)/2
+ {OV5640_TIMING_VOFFSET_L, OV5640_V_OFFSET_L(11) },
+ {OV5640_TIMING_X_INC, 0x11 | (3<<4) },
+ {OV5640_TIMING_Y_INC, 0x11 | (3<<4) },
+ {OV5640_TIMING_REG20, 0x40},
+ {OV5640_TIMING_REG21, REG21_BINNING_EN},
+
+ /* magic registers */
{0x3612, 0x29},
+ {0x3618, 0x00},
{0x3708, 0x64},
{0x3709, 0x52},
{0x370c, 0x03},
/* AEC/AGC Power Down Domain Control */
- {0x3a02, 0x03},
- {0x3a03, 0xd8},
- {0x3a08, 0x01},
- {0x3a09, 0x27},
- {0x3a0a, 0x00},
- {0x3a0b, 0xf6},
- {0x3a0e, 0x03},
- {0x3a0d, 0x04},
- {0x3a14, 0x03},
- {0x3a15, 0xd8},
-
- {0x4001, 0x02},
- {0x4004, 0x02},
- {0x3000, 0x00},
- {0x3002, 0x1c},
- {0x3004, 0xff},
- {0x3006, 0xc3},
- {0x300e, 0x45},
- {0x302e, 0x08},
- /* org:30 bit[3:0]
- 0x0:YUYV 0x1:YVYU 0x2:UYVY
- 0x3:VYUY 0xF:UYVY 0x4~0xE:Not-allowed
- */
- {0x4300, 0x32},
- {0x501f, 0x00},
- {0x4713, 0x03},
- {0x4407, 0x04},
- {0x440e, 0x00},
- {0x460b, 0x35},
- {0x460c, 0x22},
- {0x4837, 0x44},
- {0x3824, 0x02},
- {0x5000, 0xa7},
- {0x5001, 0xa3},
+ {OV5640_AEC_MAX_EXPO60_H, 0x03},
+ {OV5640_AEC_MAX_EXPO60_L, 0xd8},
+ {OV5640_AEC_B50_STEP_H, 0x01},
+ {OV5640_AEC_B50_STEP_L, 0x27},
+ {OV5640_AEC_B60_STEP_H, 0x00},
+ {OV5640_AEC_B60_STEP_L, 0xf6},
+ {OV5640_AEC_CTRL_0E, 0x03},
+ {OV5640_AEC_CTRL_0D, 0x04},
+ {OV5640_AEC_MAX_EXPO_H, 0x03},
+ {OV5640_AEC_MAX_EXPO_L, 0xd8},
+ {OV5640_AEC_CTRL_13, 0x43},
+ {OV5640_AEC_GAIN_CEILING_0, 0x00},
+ {OV5640_AEC_GAIN_CEILING_1, 0xf8},
+ {OV5640_AEC_CTRL_0F, 0x30},
+ {OV5640_AEC_CTRL_10, 0x28},
+ {OV5640_AEC_CTRL_1B, 0x30},
+ {OV5640_AEC_CTRL_1E, 0x26},
+ {OV5640_AEC_CTRL_11, 0x60},
+ {OV5640_AEC_CTRL_1F, 0x14},
+
+ {OV5640_SYSTEM_CTRL, 0x02},
+ {OV5640_TABLE_END, 0x0000},
+};
+
+static struct ov5640_reg mode_common_registers[] = {
+
+ /* SCCB Control */
+ {OV5640_SCCB_SYSTEM_CTRL, 0x03}, /* Clock from PLL */
+ {OV5640_SCCB_SCLK, 0x01}, /* PCLK = pll_clki, SCLK = pll_clki/2 */
+
+ /* System Control */
+ {OV5640_SYSTEM_CTRL, 0x02 | SYSTEM_CTRL_SRST }, /* Software reset */
+ {OV5640_TABLE_WAIT_MS, 5},
+ {OV5640_SYSTEM_CTRL, 0x02 | SYSTEM_CTRL_PDOWN }, /* Software power down */
+ {OV5640_SYSTEM_RESET_00, 0x00},
+ {OV5640_SYSTEM_RESET_01, 0x00},
+ {OV5640_SYSTEM_RESET_02, 0x1c},
+ {OV5640_SYSTEM_CLK_EN_00, 0xff},
+ {OV5640_SYSTEM_CLK_EN_02, 0xc3},
+ {OV5640_SYSTEM_MIPI_CTRL, 0x45},
+
+ {OV5640_SYSTEM_CTRL_2D, 0x60}, /* magic values */
+ {OV5640_SYSTEM_CTRL_2E, 0x08}, /* magic values */
+
+ /* Format Control */
+ {FMT_CTRL_00, FMT_YUV422 | OFMT_UYVY },
+
+ /* VCM Control */
+/* {OV5640_VCM_DEBUG_MODE_0, 0x08},*/
+/* {OV5640_VCM_DEBUG_MODE_1, 0x33},*/
+
+ /* 50/60Hz detector control */
+ {OV5640_5060_CTRL_01, 0x34},
+ {OV5640_5060_CTRL_04, 0x28},
+ {OV5640_5060_CTRL_05, 0x98},
+ {OV5640_5060_THRESHOLD_1_H, 0x00},
+ {OV5640_5060_THRESHOLD_1_L, 0x07},
+ {OV5640_5060_THRESHOLD_2_H, 0x00},
+ {OV5640_5060_THRESHOLD_2_L, 0x1c},
+ {OV5640_5060_SAMPLE_NUM_H, 0x9c},
+ {OV5640_5060_SAMPLE_NUM_L, 0x40},
+
+ /* BLC Control */
+ {OV5640_BLC_CTRL_01, 0x02},
+ {OV5640_BLC_CTRL_04, 0x06},
/* AWB Control */
- {0x5180, 0xff},
- {0x5181, 0xf2},
- {0x5182, 0x00},
- {0x5183, 0x14},
- {0x5184, 0x25},
- {0x5185, 0x24},
- {0x5186, 0x09},
- {0x5187, 0x09},
- {0x5188, 0x09},
- {0x5189, 0x75},
- {0x518a, 0x54},
- {0x518b, 0xe0},
- {0x518c, 0xb2},
- {0x518d, 0x42},
- {0x518e, 0x3d},
- {0x518d, 0x56},
- {0x5190, 0x46},
- {0x5191, 0xf8},
- {0x5192, 0x04},
- {0x5193, 0x70},
- {0x5194, 0xf0},
- {0x5195, 0xf0},
- {0x5196, 0x03},
- {0x5197, 0x01},
- {0x5198, 0x04},
- {0x5199, 0x12},
- {0x519a, 0x04},
- {0x519b, 0x00},
- {0x519c, 0x06},
- {0x519d, 0x82},
- {0x519e, 0x38},
+ {OV5640_AWB_CTRL_00, 0xff},
+ {OV5640_AWB_CTRL_01, 0xf2},
+ {OV5640_AWB_CTRL_02, 0x00},
+ {OV5640_AWB_CTRL_03, 0x14},
+ {OV5640_AWB_CTRL_04, 0x25},
+ {OV5640_AWB_CTRL_05, 0x24},
+ {OV5640_AWB_CTRL_06, 0x09},
+ {OV5640_AWB_CTRL_07, 0x09},
+ {OV5640_AWB_CTRL_08, 0x09},
+ {OV5640_AWB_CTRL_09, 0x75},
+ {OV5640_AWB_CTRL_10, 0x54},
+ {OV5640_AWB_CTRL_11, 0xe0},
+ {OV5640_AWB_CTRL_12, 0xb2},
+ {OV5640_AWB_CTRL_13, 0x42},
+ {OV5640_AWB_CTRL_14, 0x3d},
+ {OV5640_AWB_CTRL_15, 0x56},
+ {OV5640_AWB_CTRL_16, 0x46},
+ {OV5640_AWB_CTRL_17, 0xf8},
+ {OV5640_AWB_CTRL_18, 0x04},
+ {OV5640_AWB_CTRL_19, 0x70},
+ {OV5640_AWB_CTRL_20, 0xf0},
+ {OV5640_AWB_CTRL_21, 0xf0},
+ {OV5640_AWB_CTRL_22, 0x03},
+ {OV5640_AWB_CTRL_23, 0x01},
+ {OV5640_AWB_CTRL_24, 0x04},
+ {OV5640_AWB_CTRL_25, 0x12},
+ {OV5640_AWB_CTRL_26, 0x04},
+ {OV5640_AWB_CTRL_27, 0x00},
+ {OV5640_AWB_CTRL_28, 0x06},
+ {OV5640_AWB_CTRL_29, 0x82},
+ {OV5640_AWB_CTRL_30, 0x38},
/* CMX Control */
- {0x5381, 0x1e},
- {0x5382, 0x5b},
- {0x5383, 0x08},
- {0x5384, 0x0a},
- {0x5385, 0x7e},
- {0x5386, 0x88},
- {0x5387, 0x7c},
- {0x5388, 0x6c},
- {0x5389, 0x10},
- {0x538a, 0x01},
- {0x538b, 0x98},
+ {OV5640_CMX1, 0x1e},
+ {OV5640_CMX2, 0x5b},
+ {OV5640_CMX3, 0x08},
+ {OV5640_CMX4, 0x0a},
+ {OV5640_CMX5, 0x7e},
+ {OV5640_CMX6, 0x88},
+ {OV5640_CMX7, 0x7c},
+ {OV5640_CMX8, 0x6c},
+ {OV5640_CMX9, 0x10},
+ {OV5640_CMX_SIGN_0, 0x01},
+ {OV5640_CMX_SIGN_1, 0x98},
/* CIP Control */
- {0x5300, 0x08},
- {0x5301, 0x30},
- {0x5302, 0x10},
- {0x5303, 0x00},
- {0x5304, 0x08},
- {0x5305, 0x30},
- {0x5306, 0x08},
- {0x5307, 0x16},
- {0x5309, 0x08},
- {0x530a, 0x30},
- {0x530b, 0x04},
- {0x530c, 0x06},
+ {OV5640_CIP_SH_MT_THRES_1, 0x08},
+ {OV5640_CIP_SH_MT_THRES_2, 0x30},
+ {OV5640_CIP_SH_MT_OFFSET_1, 0x10},
+ {OV5640_CIP_SH_MT_OFFSET_2, 0x00},
+ {OV5640_CIP_DNS_THRES_1, 0x08},
+ {OV5640_CIP_DNS_THRES_2, 0x30},
+ {OV5640_CIP_DNS_OFFSET_1, 0x08},
+ {OV5640_CIP_DNS_OFFSET_2, 0x16},
+ {OV5640_CIP_SH_TH_THRES_1, 0x08},
+ {OV5640_CIP_SH_TH_THRES_2, 0x30},
+ {OV5640_CIP_SH_TH_OFFSET_1, 0x04},
+ {OV5640_CIP_SH_TH_OFFSET_2, 0x06},
/* Gamma Control */
- {0x5480, 0x01},
- {0x5481, 0x08},
- {0x5482, 0x14},
- {0x5483, 0x28},
- {0x5484, 0x51},
- {0x5485, 0x65},
- {0x5486, 0x71},
- {0x5487, 0x7d},
- {0x5488, 0x87},
- {0x5489, 0x91},
- {0x548a, 0x9a},
- {0x548b, 0xaa},
- {0x548c, 0xb8},
- {0x548d, 0xcd},
- {0x548e, 0xdd},
- {0x548f, 0xea},
- {0x5490, 0x1d},
+ {OV5640_GAMMA_CTRL_00, 0x01},
+ {OV5640_GAMMA_YST_00, 0x08},
+ {OV5640_GAMMA_YST_01, 0x14},
+ {OV5640_GAMMA_YST_02, 0x28},
+ {OV5640_GAMMA_YST_03, 0x51},
+ {OV5640_GAMMA_YST_04, 0x65},
+ {OV5640_GAMMA_YST_05, 0x71},
+ {OV5640_GAMMA_YST_06, 0x7d},
+ {OV5640_GAMMA_YST_07, 0x87},
+ {OV5640_GAMMA_YST_08, 0x91},
+ {OV5640_GAMMA_YST_09, 0x9a},
+ {OV5640_GAMMA_YST_0A, 0xaa},
+ {OV5640_GAMMA_YST_0B, 0xb8},
+ {OV5640_GAMMA_YST_0C, 0xcd},
+ {OV5640_GAMMA_YST_0D, 0xdd},
+ {OV5640_GAMMA_YST_0E, 0xea},
+ {OV5640_GAMMA_YST_0F, 0x1d},
/* SDE Control */
- {0x5580, 0x02},
- {0x5583, 0x40},
- {0x5584, 0x10},
- {0x5589, 0x10},
- {0x558a, 0x00},
- {0x558b, 0xf8},
+ {OV5640_SDE_CTRL_0, 0x02},
+ {OV5640_SDE_CTRL_3, 0x40},
+ {OV5640_SDE_CTRL_4, 0x10},
+ {OV5640_SDE_CTRL_9, 0x10},
+ {OV5640_SDE_CTRL_10, 0x00},
+ {OV5640_SDE_CTRL_11, 0xf8},
/* LENC Control */
- {0x5800, 0x23},
- {0x5801, 0x14},
- {0x5802, 0x0f},
- {0x5803, 0x0f},
- {0x5804, 0x12},
- {0x5805, 0x26},
- {0x5806, 0x0c},
- {0x5807, 0x08},
- {0x5808, 0x05},
- {0x5809, 0x05},
- {0x580a, 0x08},
- {0x580b, 0x0d},
- {0x580c, 0x08},
- {0x580d, 0x03},
- {0x580e, 0x00},
- {0x580f, 0x00},
- {0x5810, 0x03},
- {0x5811, 0x09},
- {0x5812, 0x07},
- {0x5813, 0x03},
- {0x5814, 0x00},
- {0x5815, 0x01},
- {0x5816, 0x03},
- {0x5817, 0x08},
- {0x5818, 0x0d},
- {0x5819, 0x08},
- {0x581a, 0x05},
- {0x581b, 0x06},
- {0x581c, 0x08},
- {0x581d, 0x0e},
- {0x581e, 0x29},
- {0x581f, 0x17},
- {0x5820, 0x11},
- {0x5821, 0x11},
- {0x5822, 0x15},
- {0x5823, 0x28},
- {0x5824, 0x46},
- {0x5825, 0x26},
- {0x5826, 0x08},
- {0x5827, 0x26},
- {0x5828, 0x64},
- {0x5829, 0x26},
- {0x582a, 0x24},
- {0x582b, 0x22},
- {0x582c, 0x24},
- {0x582d, 0x24},
- {0x582e, 0x06},
- {0x582f, 0x22},
- {0x5830, 0x40},
- {0x5831, 0x42},
- {0x5832, 0x24},
- {0x5833, 0x26},
- {0x5834, 0x24},
- {0x5835, 0x22},
- {0x5836, 0x22},
- {0x5837, 0x26},
- {0x5838, 0x44},
- {0x5839, 0x24},
- {0x583a, 0x26},
- {0x583b, 0x28},
- {0x583c, 0x42},
- {0x583d, 0xce},
-
- {0x5025, 0x00},
- {0x3a0f, 0x30},
- {0x3a10, 0x28},
- {0x3a1b, 0x30},
- {0x3a1e, 0x26},
- {0x3a11, 0x60},
- {0x3a1f, 0x14},
- {0x3008, 0x02},
+ {OV5640_LENC_GMTRX_00, 0x23},
+ {OV5640_LENC_GMTRX_01, 0x14},
+ {OV5640_LENC_GMTRX_02, 0x0f},
+ {OV5640_LENC_GMTRX_03, 0x0f},
+ {OV5640_LENC_GMTRX_04, 0x12},
+ {OV5640_LENC_GMTRX_05, 0x26},
+ {OV5640_LENC_GMTRX_10, 0x0c},
+ {OV5640_LENC_GMTRX_11, 0x08},
+ {OV5640_LENC_GMTRX_12, 0x05},
+ {OV5640_LENC_GMTRX_13, 0x05},
+ {OV5640_LENC_GMTRX_14, 0x08},
+ {OV5640_LENC_GMTRX_15, 0x0d},
+ {OV5640_LENC_GMTRX_20, 0x08},
+ {OV5640_LENC_GMTRX_21, 0x03},
+ {OV5640_LENC_GMTRX_22, 0x00},
+ {OV5640_LENC_GMTRX_23, 0x00},
+ {OV5640_LENC_GMTRX_24, 0x03},
+ {OV5640_LENC_GMTRX_25, 0x09},
+ {OV5640_LENC_GMTRX_30, 0x07},
+ {OV5640_LENC_GMTRX_31, 0x03},
+ {OV5640_LENC_GMTRX_32, 0x00},
+ {OV5640_LENC_GMTRX_33, 0x01},
+ {OV5640_LENC_GMTRX_34, 0x03},
+ {OV5640_LENC_GMTRX_35, 0x08},
+ {OV5640_LENC_GMTRX_40, 0x0d},
+ {OV5640_LENC_GMTRX_41, 0x08},
+ {OV5640_LENC_GMTRX_42, 0x05},
+ {OV5640_LENC_GMTRX_43, 0x06},
+ {OV5640_LENC_GMTRX_44, 0x08},
+ {OV5640_LENC_GMTRX_45, 0x0e},
+ {OV5640_LENC_GMTRX_50, 0x29},
+ {OV5640_LENC_GMTRX_51, 0x17},
+ {OV5640_LENC_GMTRX_52, 0x11},
+ {OV5640_LENC_GMTRX_53, 0x11},
+ {OV5640_LENC_GMTRX_54, 0x15},
+ {OV5640_LENC_GMTRX_55, 0x28},
+ {OV5640_LENC_BRMATRX_00, 0x46},
+ {OV5640_LENC_BRMATRX_01, 0x26},
+ {OV5640_LENC_BRMATRX_02, 0x08},
+ {OV5640_LENC_BRMATRX_03, 0x26},
+ {OV5640_LENC_BRMATRX_04, 0x64},
+ {OV5640_LENC_BRMATRX_05, 0x26},
+ {OV5640_LENC_BRMATRX_06, 0x24},
+ {OV5640_LENC_BRMATRX_07, 0x22},
+ {OV5640_LENC_BRMATRX_08, 0x24},
+ {OV5640_LENC_BRMATRX_09, 0x24},
+ {OV5640_LENC_BRMATRX_20, 0x06},
+ {OV5640_LENC_BRMATRX_21, 0x22},
+ {OV5640_LENC_BRMATRX_22, 0x40},
+ {OV5640_LENC_BRMATRX_23, 0x42},
+ {OV5640_LENC_BRMATRX_24, 0x24},
+ {OV5640_LENC_BRMATRX_30, 0x26},
+ {OV5640_LENC_BRMATRX_31, 0x24},
+ {OV5640_LENC_BRMATRX_32, 0x22},
+ {OV5640_LENC_BRMATRX_33, 0x22},
+ {OV5640_LENC_BRMATRX_34, 0x26},
+ {OV5640_LENC_BRMATRX_40, 0x44},
+ {OV5640_LENC_BRMATRX_41, 0x24},
+ {OV5640_LENC_BRMATRX_42, 0x26},
+ {OV5640_LENC_BRMATRX_43, 0x28},
+ {OV5640_LENC_BRMATRX_44, 0x42},
+ {OV5640_LENC_BR_OFFSET, 0xce},
+
+ /* magic registers */
+ {0x3620, 0x52},
+ {0x3621, 0xe0},
+ {0x3622, 0x01},
+ {0x3630, 0x36},
+ {0x3631, 0x0e},
+ {0x3632, 0xe2},
+ {0x3633, 0x12},
+ {0x3634, 0x40},
+ {0x3635, 0x13},
+ {0x3636, 0x03},
+ {0x3703, 0x5a},
+ {0x3704, 0xa0},
+ {0x3705, 0x1a},
+ {0x370b, 0x60},
+ {0x3715, 0x78},
+ {0x3717, 0x01},
+ {0x371b, 0x20},
+ {0x3731, 0x12},
+ {0x3901, 0x0a},
+ {0x3905, 0x02},
+ {0x3906, 0x10},
+ {0x471c, 0x50},
+
{OV5640_TABLE_END, 0x0000},
};
enum {
OV5640_MODE_640x480,
- OV5640_MODE_1296x972,
+ OV5640_MODE_1280x960,
OV5640_MODE_1920x1080,
OV5640_MODE_2592x1944,
OV5640_SIZE_LAST,
@@ -1104,7 +885,7 @@ enum {
static struct ov5640_reg *mode_table[] = {
[OV5640_MODE_640x480] = mode_640x480,
- [OV5640_MODE_1296x972] = mode_1296x972,
+ [OV5640_MODE_1280x960] = mode_1280x960,
[OV5640_MODE_1920x1080] = mode_1920x1080,
[OV5640_MODE_2592x1944] = mode_2592x1944,
};
@@ -1113,9 +894,9 @@ static int test_pattern;
module_param(test_pattern, int, 0644);
static struct ov5640_reg tp_cbars[] = {
- {0x503D, 0x80},
- {0x503E, 0x00},
- {0x5046, 0x01},
+ {OV5640_ISP_TEST, ISP_TEST_EN | ISP_TEST_00 | ISP_TEST_TRANSPARENT | ISP_TEST_ROLLING },
+ {OV5640_ISP_DEBUG_3E, 0x00},
+ {OV5640_ISP_DEBUG_46, 0x01},
{OV5640_TABLE_END, 0x0000}
};
@@ -1143,7 +924,7 @@ static enum v4l2_mbus_pixelcode ov5640_codes[] = {
static const struct v4l2_frmsize_discrete ov5640_frmsizes[OV5640_SIZE_LAST] = {
{640, 480},
- {1296, 972},
+ {1280, 960},
{1920, 1080},
{2592, 1944},
};
@@ -1217,8 +998,9 @@ static int ov5640_write_reg(struct i2c_client *client, u16 addr, u8 value)
msg[0].buf = data;
count = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
- if (count == ARRAY_SIZE(msg))
+ if (count == ARRAY_SIZE(msg)) {
return 0;
+ }
dev_err(&client->dev,
"ov5840: i2c transfer failed, addr: %x, value: %02x\n",
addr, (u32)value);
@@ -1314,6 +1096,10 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
+ ret = ov5640_write_table(priv, mode_common_registers);
+ if (ret)
+ return ret;
+
ret = ov5640_write_table(priv, mode_table[priv->mode]);
if (ret)
return ret;
@@ -1365,12 +1151,12 @@ static int ov5640_s_power(struct v4l2_subdev *sd, int on)
{
struct ov5640_priv *priv = to_ov5640(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct soc_camera_link *icl = soc_camera_i2c_to_link(client);
+ struct soc_camera_subdev_desc *scsd = soc_camera_i2c_to_desc(client);
if (on)
ov5640_s_fmt(sd, &priv->mf);
- return soc_camera_set_power(&client->dev, icl, on);
+ return soc_camera_set_power(&client->dev, scsd, on);
}
static int ov5640_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
@@ -1409,6 +1195,11 @@ static int ov5640_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a)
return 0;
}
+static int ov5640_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ return 0;
+}
+
/* Get chip identification */
static int ov5640_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *id)
@@ -1462,6 +1253,7 @@ static struct v4l2_subdev_video_ops ov5640_video_ops = {
.enum_mbus_fmt = ov5640_enum_fmt,
.cropcap = ov5640_cropcap,
.g_crop = ov5640_g_crop,
+ .querystd = ov5640_querystd,
};
@@ -1486,13 +1278,13 @@ static int ov5640_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct ov5640_priv *priv;
- struct soc_camera_link *icl;
+ struct soc_camera_subdev_desc *scsd;
u8 chip_id_hi, chip_id_lo;
int ret;
/* Checking soc-camera interface */
- icl = soc_camera_i2c_to_link(client);
- if (!icl) {
+ scsd = soc_camera_i2c_to_desc(client);
+ if (!scsd) {
dev_err(&client->dev, "Missing soc_camera_link for driver\n");
return -EINVAL;
}
@@ -1513,14 +1305,14 @@ static int ov5640_probe(struct i2c_client *client,
/*
* check and show product ID and manufacturer ID
*/
- soc_camera_power_on(&client->dev, icl);
- ret = ov5640_read_reg(client, 0x300A, &chip_id_hi);
+ soc_camera_power_on(&client->dev, scsd);
+ ret = ov5640_read_reg(client, OV5640_CHIP_ID_H, &chip_id_hi);
if (ret < 0) {
dev_err(&client->dev, "Failure to read Chip ID (high byte)\n");
return ret;
}
- ret = ov5640_read_reg(client, 0x300B, &chip_id_lo);
+ ret = ov5640_read_reg(client, OV5640_CHIP_ID_L, &chip_id_lo);
if (ret < 0) {
dev_err(&client->dev, "Failure to read Chip ID (low byte)\n");
return ret;
@@ -1534,7 +1326,7 @@ static int ov5640_probe(struct i2c_client *client,
ret = -ENODEV;
return ret;
}
- soc_camera_power_off(&client->dev, icl);
+ soc_camera_power_off(&client->dev, scsd);
ov5640_set_default_fmt(priv);
diff --git a/drivers/media/i2c/soc_camera/tc358743.c b/drivers/media/i2c/soc_camera/tc358743.c
new file mode 100644
index 000000000000..7a63b1ec019a
--- /dev/null
+++ b/drivers/media/i2c/soc_camera/tc358743.c
@@ -0,0 +1,1104 @@
+/*
+ * based on OV5640 driver and TC358743 driver for i.MX6
+ *
+ * author: Antmicro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/soc_camera.h>
+
+#define PLLCTL0 0x0020
+#define PLLCTL1 0x0022
+
+#define MASK_PLL_PRD 0xf000
+#define SET_PLL_PRD(prd) ((((prd) - 1) << 12) & MASK_PLL_PRD)
+
+#define MASK_PLL_FBD 0x01ff
+#define SET_PLL_FBD(fbd) (((fbd) - 1) & MASK_PLL_FBD)
+
+#define MASK_CKEN 0x0010
+#define MASK_RESETB 0x0002
+#define MASK_PLL_EN 0x0001
+
+#define MASK_NOL_1 0
+#define MASK_NOL_2 2
+#define MASK_NOL_3 4
+#define MASK_NOL_4 6
+
+/* this is true for 27 MHz refclk */
+#define REFCLK_PRE_DIVIDER 9
+#define SET_PLL_FREQ(x) (SET_PLL_PRD(REFCLK_PRE_DIVIDER) | SET_PLL_FBD(DIV_ROUND_UP(x,3)))
+
+#define FRS_BW_25 0x0
+#define FRS_BW_33 0x1
+#define FRS_BW_50 0x2
+#define FRS_BW_MAX 0x3
+
+#define MASK_PLL_FRS 0x0f00
+#define SET_PLL_FRS(frs,bw) (((frs | bw) << 8) & MASK_PLL_FRS)
+#define GET_FRS(x) (x < 125) ? 0xC : (x < 250) ? 0x8 : (x < 500) ? 0x4 : 0x0
+
+#define HCNT_MASK 0x3f
+#define CALC_HDR_CNT(prep, zero) ((zero & HCNT_MASK) << 8) | (prep & HCNT_MASK)
+
+#define SYS_STATUS 0x8520
+#define CSI_STATUS 0x0410
+#define MASK_S_WSYNC 0x0400
+#define VI_STATUS0 0x8521
+#define VI_STATUS1 0x8522
+#define AU_STATUS0 0x8523
+#define VI_STATUS2 0x8525
+#define VI_STATUS3 0x8528
+
+#define EDID_MODE 0x85C7
+#define MASK_EDID_SPEED 0x40
+#define MASK_EDID_MODE 0x03
+#define MASK_EDID_MODE_DISABLE 0x00
+#define MASK_EDID_MODE_DDC2B 0x01
+#define MASK_EDID_MODE_E_DDC 0x02
+#define EDID_LEN1 0x85CA
+#define EDID_LEN2 0x85CB
+
+
+#define SYSCTL 0x0002
+#define CONFCTL 0x0004
+#define MASK_AUTOINDEX 0x0004
+#define FIFOCTL 0x0006
+#define INTSTATUS 0x0014
+#define INTMASK 0x0016
+#define PHY_RST 0x8535
+#define MASK_RESET_CTRL 0x01 /* Reset active low */
+
+#define HPD_CTL 0x8544
+#define MASK_HPD_CTL0 0x10
+#define MASK_HPD_OUT0 0x01
+
+#define BKSV 0x8800
+
+#define CSI_CONFW 0x0500
+#define MASK_MODE_SET 0xa0000000
+#define MASK_ADDRESS_CSI_CONTROL 0x03000000
+#define MASK_CSI_MODE 0x8000
+#define MASK_HTXTOEN 0x0400
+#define MASK_TXHSMD 0x0080
+#define MASK_HSCKMD 0x0020
+#define HSTXVREGEN 0x0234
+#define TXOPTIONCNTRL 0x0238
+#define MASK_CONTCLKMODE 0x00000001
+#define STARTCNTRL 0x0204
+#define MASK_START 0x00000001
+#define CLW_CNTRL 0x0140
+#define MASK_CLW_LANEDISABLE 0x0001
+
+#define D0W_CNTRL 0x0144
+#define MASK_D0W_LANEDISABLE 0x0001
+
+#define D1W_CNTRL 0x0148
+#define MASK_D1W_LANEDISABLE 0x0001
+
+#define D2W_CNTRL 0x014C
+#define MASK_D2W_LANEDISABLE 0x0001
+
+#define D3W_CNTRL 0x0150
+#define MASK_D3W_LANEDISABLE 0x0001
+
+#define LINEINITCNT 0x0210
+#define LPTXTIMECNT 0x0214
+#define TCLK_HEADERCNT 0x0218
+#define TCLK_TRAILCNT 0x021C
+#define THS_HEADERCNT 0x0220
+#define TWAKEUP 0x0224
+#define TCLK_POSTCNT 0x0228
+#define THS_TRAILCNT 0x022C
+
+#define CSI_START 0x0518
+#define MASK_STRT 0x00000001
+
+#define PHY_CTL1 0x8532
+#define PHY_CTL2 0x8533 /* Not in REF_01 */
+#define PHY_BIAS 0x8536 /* Not in REF_01 */
+#define PHY_CSQ 0x853F /* Not in REF_01 */
+#define DDC_CTL 0x8543
+
+#define SYS_FREQ0 0x8540
+#define SYS_FREQ1 0x8541
+
+#define NCO_F0_MOD 0x8670
+#define MASK_NCO_F0_MOD_27MHZ 0x01
+
+
+#define LOCKDET_REF0 0x8630
+#define LOCKDET_REF1 0x8631
+#define LOCKDET_REF2 0x8632
+
+#define SYS_INT 0x8502
+#define MASK_I_DDC 0x01
+#define SYS_INTM 0x8512
+#define PACKET_INTM 0x8514
+#define CBIT_INTM 0x8515
+#define AUDIO_INTM 0x8516
+#define PHY_CTL0 0x8531
+#define MASK_PHY_CTL 0x01
+#define ANA_CTL 0x8545
+#define MASK_APPL_PCSX 0x30
+#define MASK_APPL_PCSX_NORMAL 0x30
+
+#define MASK_ANALOG_ON 0x01
+
+
+#define AVM_CTL 0x8546
+
+
+#define MASK_PHY_AUTO_RST4 0x04
+#define MASK_PHY_AUTO_RST3 0x02
+#define MASK_PHY_AUTO_RST2 0x01
+
+#define VOUT_SET2 0x8573
+#define MASK_VOUT_422FIL_100 0x40
+#define MASK_SEL422 0x80
+#define MASK_VOUTCOLORMODE_AUTO 0x01
+#define MASK_VOUTCOLORMODE_THROUGH 0x00
+#define MASK_VOUTCOLORMODE_MANUAL 0x03
+
+#define VOUT_SET3 0x8574
+#define MASK_VOUT_EXTCNT 0x08
+
+#define PK_INT_MODE 0x8709
+#define NO_PKT_LIMIT 0x870B
+#define NO_PKT_CLR 0x870C
+#define ERR_PK_LIMIT 0x870D
+#define NO_GDB_LIMIT 0x9007
+#define NO_PKT_LIMIT2 0x870E
+
+#define INIT_END 0x854A
+#define MASK_INIT_END 0x01
+
+
+#define HDCP_MODE 0x8560
+#define HDCP_REG1 0x8563
+#define HDCP_REG2 0x8564
+#define HDCP_REG3 0x85D1
+
+#define EDID_LEN2 0x85CB
+#define EDID_MODE 0x85C7
+
+#define FH_MIN0 0x85AA
+#define FH_MIN1 0x85AB
+#define FH_MAX0 0x85AC
+#define FH_MAX1 0x85AD
+
+#define DE_WIDTH_H_LO 0x8582
+#define DE_WIDTH_H_HI 0x8583
+#define DE_WIDTH_V_LO 0x8588
+#define DE_WIDTH_V_HI 0x8589
+#define H_SIZE_LO 0x858A
+#define H_SIZE_HI 0x858B
+#define V_SIZE_LO 0x858C
+#define V_SIZE_HI 0x858D
+#define FV_CNT_LO 0x85A1
+#define FV_CNT_HI 0x85A2
+
+
+#define MASK_IRRST 0x0800
+#define MASK_CECRST 0x0400
+#define MASK_CTXRST 0x0200
+#define MASK_HDMIRST 0x0100
+
+#define MASK_AUDCHNUM_2 0x0c00
+#define MASK_AUTOINDEX 0x0004
+#define MASK_ABUFEN 0x0002
+#define MASK_VBUFEN 0x0001
+#define MASK_AUDOUTSEL_I2S 0x0010
+
+#define MASK_YCBCRFMT 0x00c0
+
+#define MASK_YCBCRFMT_422_12_BIT 0x0040
+#define MASK_YCBCRFMT_COLORBAR 0x0080
+#define MASK_YCBCRFMT_422_8_BIT 0x00c0
+
+#define MASK_YCBCRFMT_444 0x0000
+
+#define MASK_INFRMEN 0x0020
+#define PHY_EN 0x8534
+#define MASK_ENABLE_PHY 0x01
+
+#define MASK_PWRISO 0x8000
+
+#define VI_MODE 0x8570
+#define MASK_RGB_DVI 0x8
+
+#define VI_REP 0x8576
+#define MASK_VOUT_COLOR_SEL 0xe0
+#define MASK_VOUT_COLOR_RGB_FULL 0x00
+#define MASK_VOUT_COLOR_RGB_LIMITED 0x20
+#define MASK_VOUT_COLOR_601_YCBCR_FULL 0x40
+#define MASK_VOUT_COLOR_601_YCBCR_LIMITED 0x60
+#define MASK_VOUT_COLOR_709_YCBCR_FULL 0x80
+#define MASK_VOUT_COLOR_709_YCBCR_LIMITED 0xa0
+#define MASK_VOUT_COLOR_FULL_TO_LIMITED 0xc0
+#define MASK_VOUT_COLOR_LIMITED_TO_FULL 0xe0
+#define MASK_IN_REP_HEN 0x10
+#define MASK_IN_REP 0x0f
+
+/* status regs */
+#define SYS_STATUS 0x8520
+#define MASK_S_SYNC 0x80
+#define MASK_S_AVMUTE 0x40
+#define MASK_S_HDCP 0x20
+#define MASK_S_HDMI 0x10
+#define MASK_S_PHY_SCDT 0x08
+#define MASK_S_PHY_PLL 0x04
+#define MASK_S_TMDS 0x02
+#define MASK_S_DDC5V 0x01
+#define CSI_STATUS 0x0410
+#define MASK_S_WSYNC 0x0400
+#define MASK_S_TXACT 0x0200
+#define MASK_S_RXACT 0x0100
+#define MASK_S_HLT 0x0001
+#define VI_STATUS1 0x8522
+#define MASK_S_V_GBD 0x08
+#define MASK_S_DEEPCOLOR 0x0c
+#define MASK_S_V_422 0x02
+#define MASK_S_V_INTERLACE 0x01
+#define AU_STATUS0 0x8523
+#define MASK_S_A_SAMPLE 0x01
+#define VI_STATUS3 0x8528
+#define MASK_S_V_COLOR 0x1e
+#define MASK_LIMITED 0x01
+
+#define HDMI_DET 0x8552 /* Not in REF_01 */
+#define MASK_HDMI_DET_MOD1 0x80
+#define MASK_HDMI_DET_MOD0 0x40
+#define MASK_HDMI_DET_V 0x30
+#define MASK_HDMI_DET_V_SYNC 0x00
+#define MASK_HDMI_DET_V_ASYNC_25MS 0x10
+#define MASK_HDMI_DET_V_ASYNC_50MS 0x20
+#define MASK_HDMI_DET_V_ASYNC_100MS 0x30
+#define MASK_HDMI_DET_NUM 0x0f
+
+#define HV_RST 0x85AF /* Not in REF_01 */
+#define MASK_H_PI_RST 0x20
+#define MASK_V_PI_RST 0x10
+
+#define MASK_DDC5V_MODE_100MS 2
+
+#define VI_MUTE 0x857F
+#define MASK_AUTO_MUTE 0xc0
+#define MASK_VI_MUTE 0x10
+
+#define FORCE_MUTE 0x8600
+#define MASK_FORCE_AMUTE 0x10
+
+#define BCAPS 0x8840
+#define BSTATUS1 0x8842
+
+#define MASK_MAX_EXCED 0x08
+#define MASK_REPEATER 0x40
+#define MASK_READY 0x20
+
+#define MASK_HDMI_RSVD 0x80
+
+#define VOUT_SET0 0x8571
+#define VOUT_SET1 0x8572
+
+#define CMD_AUD 0x8601
+#define MASK_CMD_BUFINIT 0x04
+#define MASK_CMD_LOCKDET 0x02
+#define MASK_CMD_MUTE 0x01
+
+#define MASK_FORCE_DMUTE 0x01
+
+#define ERR_PK_LIMIT 0x870D
+#define NO_PKT_LIMIT2 0x870E
+#define PK_AVI_0HEAD 0x8710
+#define PK_AVI_1HEAD 0x8711
+#define PK_AVI_2HEAD 0x8712
+#define PK_AVI_0BYTE 0x8713
+#define PK_AVI_1BYTE 0x8714
+#define PK_AVI_2BYTE 0x8715
+#define PK_AVI_3BYTE 0x8716
+#define PK_AVI_4BYTE 0x8717
+#define PK_AVI_5BYTE 0x8718
+#define PK_AVI_6BYTE 0x8719
+#define PK_AVI_7BYTE 0x871A
+#define PK_AVI_8BYTE 0x871B
+#define PK_AVI_9BYTE 0x871C
+#define PK_AVI_10BYTE 0x871D
+#define PK_AVI_11BYTE 0x871E
+#define PK_AVI_12BYTE 0x871F
+#define PK_AVI_13BYTE 0x8720
+#define PK_AVI_14BYTE 0x8721
+#define PK_AVI_15BYTE 0x8722
+#define PK_AVI_16BYTE 0x8723
+
+struct reg_value {
+ u16 addr;
+ u32 val;
+ u32 flags;
+};
+
+struct _reg_size
+{
+ u16 startaddr, endaddr;
+ int size;
+}
+
+tc358743_read_reg_size [] =
+{
+ {0x0000, 0x005a, 2},
+ {0x0140, 0x0150, 4},
+ {0x0204, 0x0238, 4},
+ {0x040c, 0x0418, 4},
+ {0x044c, 0x0454, 4},
+ {0x0500, 0x0518, 4},
+ {0x0600, 0x06cc, 4},
+ {0x7000, 0x7100, 2},
+ {0x8500, 0x8bff, 1},
+ {0x8c00, 0x8fff, 4},
+ {0x9000, 0x90ff, 1},
+ {0x9100, 0x92ff, 1},
+ {0, 0, 0},
+};
+
+typedef struct timings_regs {
+ uint32_t frequency;
+ uint32_t line_init_cnt;
+ uint32_t tlpx_time_cnt;
+
+ uint32_t tclk_prepare_cnt;
+ uint32_t tclk_zero_cnt;
+
+ uint32_t tclk_trail_cnt; /* Don't care in continous clk mode */
+
+ uint32_t ths_prepare_cnt;
+ uint32_t ths_zero_cnt;
+
+ uint32_t twakeup_cnt;
+ uint32_t tclk_post_cnt; /* Don't care in continous clk mode */
+
+ uint32_t ths_trail_cnt;
+
+ uint32_t fifo_delay;
+} timings_regs;
+
+timings_regs timings[] = {
+ /* 640x480 @ 75 */
+ { 500, 3328, 1, 1, 7, 0, 1, 0, 16384, 5, 0, 256},
+ /* 800x600 @ 75 */
+ { 594, 3712, 3, 3, 20, 0, 3, 1, 18562, 8, 2, 180},
+ /* 1024x768 @ 75 */
+ { 600, 7689, 3, 2, 20, 0, 3, 0, 18944, 8, 2, 128},
+ /* 1280x720 @ 60 */
+ { 825, 5160, 5, 4, 29, 0, 5, 5, 18000, 0, 4, 180},
+ /* 1280x1024 @ 75 and 1920x1080 @ 60 */
+ { 1075, 6000, 5, 4, 29, 0, 5, 80, 18000, 0, 4, 256},
+ {},
+};
+
+static struct reg_value tc358743_reset[] = {
+ {CONFCTL, 0, 100},
+
+ {PHY_RST, 0x0, 100},
+ {PHY_RST, 0x1, 100},
+ {PHY_EN, 0, 100},
+ {PHY_EN, 1, 100},
+
+ /* reset */
+ {SYSCTL, MASK_IRRST | MASK_CECRST | MASK_CTXRST | MASK_HDMIRST, 100},
+ {SYSCTL, 0x00000000, 1000},
+
+ /* set ref frequency to 27 MHz */
+ {SYS_FREQ0, (2700) & 0xFF, 0},
+ {SYS_FREQ1, (2700 >> 8) & 0xFF, 0},
+
+ {LOCKDET_REF0, (270000) & 0xFF, 0},
+ {LOCKDET_REF1, (270000 >> 8) & 0xFF, 0},
+ {LOCKDET_REF2, (270000 >> 16) & 0xFF, 0},
+ {FH_MIN0, (270) & 0xFF, 0},
+ {FH_MIN1, (270 >> 8) & 0xFF, 0},
+ {FH_MAX0, (27 * 66) & 0xFF, 0},
+ {FH_MAX1, ((27 * 66) >> 8) & 0xFF, 0},
+ {NCO_F0_MOD, MASK_NCO_F0_MOD_27MHZ, 0},
+};
+
+static timings_regs tc358743_get_best_timings(uint32_t frequency) {
+ int count = 0;
+ timings_regs best_timings = {};
+ while (1) {
+ if (timings[count].frequency == 0) break;
+ best_timings = timings[count];
+ if (timings[count].frequency >= frequency) break;
+ count++;
+ }
+ return best_timings;
+}
+
+static struct reg_value tc358743_2lanes_start[] = {
+ {CLW_CNTRL, 0x0000, 0},
+
+ /* Make all lanes active */
+ {D0W_CNTRL, 0x0000, 0},
+ {D1W_CNTRL, 0x0000, 0},
+
+ {D2W_CNTRL, 0x0000, 0},
+ {D3W_CNTRL, 0x0000, 0},
+ {HSTXVREGEN, 0x001f, 0},
+
+ /* Continuous clock mode */
+ {TXOPTIONCNTRL, MASK_CONTCLKMODE, 0},
+ {STARTCNTRL, MASK_START, 0},
+ {CSI_START, MASK_STRT, 0},
+
+ /* Use two lanes */
+ {CSI_CONFW, MASK_MODE_SET | MASK_ADDRESS_CSI_CONTROL | MASK_CSI_MODE | MASK_TXHSMD | MASK_HSCKMD | MASK_NOL_2, 0},
+
+ /* Output Control */
+ {CONFCTL, MASK_VBUFEN | MASK_INFRMEN | MASK_YCBCRFMT_422_8_BIT | MASK_AUTOINDEX, 0},
+};
+
+static struct reg_value tc358743_setting_hdmi[] = {
+ /* HDMI interrupt mask */
+ {SYS_INT, 0x0000, 100},
+ {SYS_INTM, 0x0000, 0},
+ {PACKET_INTM, 0x0000, 0},
+ {CBIT_INTM, 0x0000, 0},
+ {AUDIO_INTM, 0x0000, 0},
+
+ {PHY_CTL0, MASK_PHY_CTL, 0},
+ /* HDMI PHY */
+ {PHY_CTL1, 0x0080, 0},
+ {PHY_BIAS, 0x0040, 0},
+ {PHY_CSQ, 0x000a, 0},
+
+ {HDMI_DET, MASK_HDMI_DET_V_SYNC | MASK_HDMI_DET_MOD0 | MASK_HDMI_DET_MOD1, 0},
+ {HV_RST, MASK_H_PI_RST | MASK_V_PI_RST, 0},
+ {PHY_CTL2, MASK_PHY_AUTO_RST2 | MASK_PHY_AUTO_RST3 | MASK_PHY_AUTO_RST4, 0},
+
+ /* HDMI system */
+ {DDC_CTL, 0x0030 | MASK_DDC5V_MODE_100MS, 0},
+ {ANA_CTL, MASK_APPL_PCSX_NORMAL | MASK_ANALOG_ON, 0},
+ {AVM_CTL, 0x002d, 0},
+ {VI_MODE, MASK_RGB_DVI, 0},
+ {VI_MUTE, MASK_AUTO_MUTE, 0},
+ {CMD_AUD, MASK_CMD_MUTE, 0},
+
+ /* EDID */
+ {EDID_MODE, 0x0001, 2},
+ {EDID_LEN2, 0x0001, 0},
+
+ {BCAPS, MASK_REPEATER | MASK_READY, 0}, /* Turn off HDMI */
+ {BSTATUS1, MASK_MAX_EXCED, 0},
+
+ /* HDCP Settings */
+ {HDCP_REG3, 0x0001, 0},
+ {HDCP_MODE, 0x0024, 0},
+ {HDCP_REG1, 0x0011, 0},
+ {HDCP_REG2, 0x000f, 0},
+
+ /* RGB to YUV Conversion */
+ {VOUT_SET0, 0x0002,0},
+ {VOUT_SET2, MASK_VOUTCOLORMODE_AUTO | MASK_SEL422 | MASK_VOUT_422FIL_100, 0},
+ {VOUT_SET3, MASK_VOUT_EXTCNT, 0},
+ {VI_REP, MASK_VOUT_COLOR_601_YCBCR_LIMITED, 0},
+
+ /* InfoFrame extraction */
+ {PK_INT_MODE, 0x00ff, 0},
+ {NO_PKT_LIMIT, 0x002c, 0},
+ {NO_PKT_CLR, 0x0053, 0},
+ {ERR_PK_LIMIT, 0x0001, 0},
+ {NO_PKT_LIMIT2, 0x0030, 0},
+ {NO_GDB_LIMIT, 0x0010, 0},
+ {INIT_END, MASK_INIT_END, 0},
+};
+
+enum {
+ TC358743_MODE_640x480,
+ TC358743_MODE_800x600,
+ TC358743_MODE_1024x768,
+ TC358743_MODE_1280x720,
+ TC358743_MODE_1280x1024,
+ TC358743_MODE_1680x1050,
+ TC358743_MODE_1920x1080,
+ TC358743_SIZE_LAST,
+};
+
+#define to_tc358743(sd) container_of(sd, struct tc358743_priv, subdev)
+
+struct tc358743_priv {
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt mf;
+ int ident;
+ u16 chip_id;
+ u8 revision;
+ int mode;
+ struct i2c_client *client;
+};
+
+static enum v4l2_mbus_pixelcode tc358743_codes[] = {
+ V4L2_MBUS_FMT_UYVY8_2X8,
+};
+
+static const struct v4l2_frmsize_discrete tc358743_frmsizes[TC358743_SIZE_LAST] = {
+ {640, 480},
+ {800, 600},
+ {1024, 768},
+ {1280, 720},
+ {1280, 1024},
+ {1680, 1050},
+ {1920, 1080},
+};
+
+/* EDID taken from BENQ G2220HD display */
+static u8 hdmi_edid[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x09, 0xd1, 0x21, 0x78, 0x45, 0x54, 0x00, 0x00,
+ 0x26, 0x14, 0x01, 0x03, 0x80, 0x30, 0x1b, 0x78, 0x2e, 0x35, 0x81, 0xa6, 0x56, 0x48, 0x9a, 0x24,
+ 0x12, 0x50, 0x54, 0xa5, 0x6b, 0x80, 0x71, 0x00, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0xa9, 0xc0,
+ 0xb3, 0x00, 0xd1, 0xc0, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xdd, 0x0c, 0x11, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xff, 0x00, 0x48, 0x39, 0x41,
+ 0x30, 0x30, 0x34, 0x35, 0x35, 0x53, 0x4c, 0x30, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32,
+ 0x4c, 0x18, 0x53, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
+ 0x00, 0x42, 0x65, 0x6e, 0x51, 0x20, 0x47, 0x32, 0x32, 0x32, 0x30, 0x48, 0x44, 0x0a, 0x00, 0xd7,
+};
+
+static int tc358743_find_mode(u32 width, u32 height)
+{
+ int i;
+
+ for (i = 0; i < TC358743_SIZE_LAST; i++) {
+ if ((tc358743_frmsizes[i].width >= width) &&
+ (tc358743_frmsizes[i].height >= height))
+ break;
+ }
+
+ /* If not found, select the biggest */
+ if (i >= TC358743_SIZE_LAST)
+ i = TC358743_SIZE_LAST - 1;
+
+ return i;
+}
+
+static int get_register_size(u16 reg) {
+ int i = 0;
+ int size = 0;
+ while(0 != tc358743_read_reg_size[i].startaddr ||
+ 0 != tc358743_read_reg_size[i].endaddr ||
+ 0 != tc358743_read_reg_size[i].size)
+ {
+ if(tc358743_read_reg_size[i].startaddr <= reg && tc358743_read_reg_size[i].endaddr >= reg)
+ {
+ size = tc358743_read_reg_size[i].size;
+ break;
+ }
+ i++;
+ }
+ if (size == 0) {
+ printk(KERN_ERR "Unkown register 0x%04x size!!", reg);
+ }
+ return size;
+}
+
+static uint32_t i2c_rd(struct i2c_client *client, u16 reg)
+{
+ int err;
+ uint32_t result = 0;
+ uint8_t *values = (uint8_t*)&result;
+ u8 buf[2] = { reg >> 8, reg & 0xff };
+ int n = get_register_size(reg);
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 2,
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = n,
+ .buf = values,
+ },
+ };
+ err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (err != ARRAY_SIZE(msgs)) {
+ pr_err("%s: reading register 0x%x from 0x%x failed\n", __func__, reg, client->addr);
+ }
+ return result;
+}
+
+static int i2c_wr(struct i2c_client *client, u16 reg, u32 val)
+{
+ int i = 0;
+ u32 data = val;
+ u8 au8Buf[6] = {0};
+ int size = get_register_size(reg);
+ int len = get_register_size(reg);
+
+ if(!size) {
+ pr_err("%s:write reg error:reg=%x is not found\n",__func__, reg);
+ return -1;
+ }
+
+ if(size == 3) {
+ size = 2;
+ } else if(size != len) {
+ pr_err("%s:write reg len error:reg=%x %d instead of %d\n",
+ __func__, reg, len, size);
+ return 0;
+ }
+
+ while(len > 0) {
+ i = 0;
+ au8Buf[i++] = (reg >> 8) & 0xff;
+ au8Buf[i++] = reg & 0xff;
+ while(size-- > 0) {
+ au8Buf[i++] = (u8)data;
+ data >>= 8;
+ }
+
+ if (i2c_master_send(client, au8Buf, i) < 0) {
+ pr_err("%s:write reg error:reg=%x,val=%x\n",
+ __func__, reg, val);
+ return -1;
+ }
+ len -= (u8)size;
+ reg += (u16)size;
+ }
+
+ return 0;
+}
+
+static int tc358743_set_pll(struct i2c_client *client, uint32_t frequency)
+{
+ int ret = 0;
+
+ timings_regs timings = tc358743_get_best_timings(frequency);
+ if (timings.frequency == 0) return 1;
+
+ printk(KERN_DEBUG "Setting pll to frequency %d (%d) MHz\n",
+ frequency, timings.frequency);
+
+ ret += i2c_wr(client, FIFOCTL, timings.fifo_delay);
+
+ /* Disable all interrupts except the hdmi-rx */
+ ret += i2c_wr(client, INTSTATUS, 0x0);
+ ret += i2c_wr(client, INTMASK, 0x5ff);
+
+ ret += i2c_wr(client, PLLCTL0, SET_PLL_FREQ(frequency));
+
+ ret += i2c_wr(client, PLLCTL1,
+ SET_PLL_FRS(GET_FRS(frequency), FRS_BW_50) | MASK_RESETB | MASK_PLL_EN);
+ mdelay(1);
+ ret += i2c_wr(client, PLLCTL1,
+ SET_PLL_FRS(GET_FRS(frequency), FRS_BW_50) | MASK_RESETB | MASK_PLL_EN | MASK_CKEN);
+
+ ret += i2c_wr(client, LINEINITCNT, timings.line_init_cnt);
+ ret += i2c_wr(client, LPTXTIMECNT, timings.tlpx_time_cnt);
+
+ ret += i2c_wr(client, TCLK_HEADERCNT,
+ CALC_HDR_CNT(timings.tclk_prepare_cnt, timings.tclk_zero_cnt));
+
+ ret += i2c_wr(client, TCLK_TRAILCNT, timings.tclk_trail_cnt);
+
+ ret += i2c_wr(client, THS_HEADERCNT,
+ CALC_HDR_CNT(timings.ths_prepare_cnt, timings.ths_zero_cnt));
+
+ ret += i2c_wr(client, TWAKEUP, timings.twakeup_cnt);
+ ret += i2c_wr(client, TCLK_POSTCNT, timings.tclk_post_cnt);
+ ret += i2c_wr(client, THS_TRAILCNT, timings.ths_trail_cnt);
+
+ return ret;
+}
+
+static int tc358743_write_table(struct i2c_client *client, struct reg_value table[], int table_length)
+{
+ u32 lines_to_repeat = 0;
+ u32 repeats = 0;
+ u32 delay_ms = 0;
+ u16 addr = 0;
+ u32 val = 0;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < table_length; ++i) {
+ addr = table[i].addr;
+ val = table[i].val;
+ delay_ms = (table[i].flags & 0xffff);
+
+ ret = i2c_wr(client, addr, val);
+ if (ret < 0)
+ break;
+
+ if (delay_ms)
+ msleep(delay_ms);
+
+ if(((table[i].flags >> 16) & (0xff)) != 0) {
+ if(!repeats) {
+ repeats = ((table[i].flags >> 16) & 0xff);
+ lines_to_repeat = ((table[i].flags >> 24) & 0xff);
+ }
+ if(--repeats > 0) {
+ i -= lines_to_repeat;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int tc358743_write_edid(struct i2c_client *client, u8 *edid, int len)
+{
+ int i = 0, off = 0;
+ u8 au8Buf[8+2] = {0};
+ int size = 0;
+ u16 reg;
+
+ reg = 0x8C00;
+ off = 0;
+ size = ARRAY_SIZE(au8Buf)-2;
+ printk(KERN_DEBUG "Write EDID: %d (%d)\n", len, size);
+ while(len > 0)
+ {
+ i = 0;
+ au8Buf[i++] = (reg >> 8) & 0xff;
+ au8Buf[i++] = reg & 0xff;
+ while(i < ARRAY_SIZE(au8Buf))
+ {
+ au8Buf[i++] = edid[off++];
+ }
+
+ if (i2c_master_send(client, au8Buf, i) < 0) {
+ pr_err("%s:write reg error:reg=%x,val=%x\n",
+ __func__, reg, off);
+ return -1;
+ }
+ len -= (u8)size;
+ reg += (u16)size;
+ }
+ printk(KERN_DEBUG "Activate EDID\n");
+ i2c_wr(client, EDID_MODE, MASK_EDID_MODE_DDC2B);
+ i2c_wr(client, EDID_LEN1, 0x00);
+ i2c_wr(client, EDID_LEN2, 0x01);
+ return 0;
+}
+
+static int tc358743_toggle_hpd(struct i2c_client *client, int active)
+{
+ int ret = 0;
+
+ if(active)
+ {
+ ret += i2c_wr(client, HPD_CTL, 0x00);
+ mdelay(500);
+ ret += i2c_wr(client, HPD_CTL, MASK_HPD_CTL0);
+ } else {
+ ret += i2c_wr(client, HPD_CTL, MASK_HPD_CTL0);
+ mdelay(500);
+ ret += i2c_wr(client, HPD_CTL, 0x00);
+ }
+ return ret;
+}
+
+static void tc358743_set_default_fmt(struct tc358743_priv *priv)
+{
+ struct v4l2_mbus_framefmt *mf = &priv->mf;
+
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->field = V4L2_FIELD_NONE;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+/* Start/Stop streaming from the device */
+static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct tc358743_priv *priv = to_tc358743(sd);
+
+ if (!enable) {
+ printk(KERN_DEBUG "Disabling stream");
+ i2c_wr(priv->client, CONFCTL, 0);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int tc358743_try_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ int mode;
+
+ mode = tc358743_find_mode(mf->width, mf->height);
+ mf->width = tc358743_frmsizes[mode].width;
+ mf->height = tc358743_frmsizes[mode].height;
+ mf->field = V4L2_FIELD_NONE;
+ mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
+ mf->colorspace = V4L2_COLORSPACE_SRGB;
+
+ return 0;
+}
+
+
+struct aviInfoFrame {
+ u8 f17;
+ u8 y10;
+ u8 a0;
+ u8 b10;
+ u8 s10;
+ u8 c10;
+ u8 m10;
+ u8 r3210;
+ u8 itc;
+ u8 ec210;
+ u8 q10;
+ u8 sc10;
+ u8 f47;
+ u8 vic;
+ u8 yq10;
+ u8 cn10;
+ u8 pr3210;
+ u16 etb;
+ u16 sbb;
+ u16 elb;
+ u16 srb;
+};
+
+static inline bool is_hdmi(struct v4l2_subdev *sd)
+{
+ struct tc358743_priv *priv = to_tc358743(sd);
+ return i2c_rd(priv->client, SYS_STATUS) & MASK_S_HDMI;
+}
+
+/* set the format we will capture in */
+static int tc358743_s_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *mf)
+{
+ struct tc358743_priv *priv = to_tc358743(sd);
+ int ret;
+ uint32_t v2, v, width, height, v_size, h_size, fv_cnt, fps;
+
+ ret = tc358743_try_fmt(sd, mf);
+ if (ret < 0)
+ return ret;
+
+ priv->mode = tc358743_find_mode(mf->width, mf->height);
+
+ memcpy(&priv->mf, mf, sizeof(struct v4l2_mbus_framefmt));
+
+ tc358743_write_table(priv->client, tc358743_reset, ARRAY_SIZE(tc358743_reset));
+ tc358743_write_table(priv->client, tc358743_setting_hdmi, ARRAY_SIZE(tc358743_setting_hdmi));
+ msleep(100);
+
+ v = i2c_rd(priv->client, DE_WIDTH_H_LO);
+ v2 = i2c_rd(priv->client, DE_WIDTH_H_HI) & 0x1f;
+ width = (v2 << 8) + v;
+ v = i2c_rd(priv->client, DE_WIDTH_V_LO);
+ v2 = i2c_rd(priv->client, DE_WIDTH_V_HI) & 0x1f;
+ height = (v2 << 8) + v;
+ v = i2c_rd(priv->client, H_SIZE_LO);
+ v2 = i2c_rd(priv->client, H_SIZE_HI) & 0x1f;
+ h_size = (v2 << 8) + v;
+ v = i2c_rd(priv->client, V_SIZE_LO);
+ v2 = i2c_rd(priv->client, V_SIZE_HI) & 0x3f;
+ v_size = ((v2 << 8) + v) / 2;
+ v = i2c_rd(priv->client, FV_CNT_LO);
+ v2 = i2c_rd(priv->client, FV_CNT_HI) & 0x3;
+ fv_cnt = (v2 << 8) + v;
+ fps = fv_cnt > 0 ? DIV_ROUND_CLOSEST(10000, fv_cnt) : 0;
+
+ printk(KERN_DEBUG "Image is %dx%d@%d, ~%d Gbps bandwidth (~%dMHz/lane)",
+ width, height, fps,
+ DIV_ROUND_UP(fps*width*height*16, 1000*1000),
+ DIV_ROUND_UP(fps*width*height*8, 1000*1000));
+
+ if (width * fps == 0) {
+ printk(KERN_ERR "width or fps 0");
+ return 0;
+ }
+
+ if ((width != mf->width)) {
+ printk(KERN_ERR "Wrong width (%d vs %d)", width, mf->width);
+ return 0;
+ }
+
+ /* XXX the chip sometimes miscalculates the height of the image,
+ * therefore, we base the output mostly on the width of the image */
+ if (width == 1920) {
+ /* works for 1920x1080 @ 60 */
+ ret = tc358743_set_pll(priv->client, 1075);
+ /* 872 is in the middle between 1024 and 720, should be a safe
+ * value for both 1280x720 and 1280x1024 */
+ } else if (width == 1280 && height >= 872) {
+ /* works on 1280x1024 @ 75 */
+ ret = tc358743_set_pll(priv->client, 1075);
+ } else if (width == 1280 && height < 872) {
+ /* works on 1280x720 @ 60 */
+ ret = tc358743_set_pll(priv->client, 825);
+ } else if (width == 1024) {
+ /* works on 1024x768 @ 75 */
+ ret = tc358743_set_pll(priv->client, 600);
+ } else if (width >= 800) {
+ /* works for 800x600@75 */
+ ret = tc358743_set_pll(priv->client, 594);
+ } else if (width == 640) {
+ /* works on 640x480 @ 75 */
+ ret = tc358743_set_pll(priv->client, 500);
+ } else {
+ return 0;
+ }
+
+ tc358743_write_table(priv->client,
+ tc358743_2lanes_start,
+ ARRAY_SIZE(tc358743_2lanes_start));
+
+ return ret;
+}
+
+static int tc358743_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index >= ARRAY_SIZE(tc358743_codes))
+ return -EINVAL;
+
+ *code = tc358743_codes[index];
+
+ return 0;
+}
+
+/* Get chip identification */
+static int tc358743_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *id)
+{
+ struct tc358743_priv *priv = to_tc358743(sd);
+
+ id->ident = priv->ident;
+ id->revision = priv->revision;
+
+ return 0;
+}
+
+static int tc358743_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct soc_camera_subdev_desc *scsd = soc_camera_i2c_to_desc(client);
+
+ return soc_camera_set_power(&client->dev, scsd, on);
+}
+
+static struct v4l2_subdev_video_ops tc358743_video_ops = {
+ .s_stream = tc358743_s_stream,
+ .s_mbus_fmt = tc358743_s_fmt,
+ .try_mbus_fmt = tc358743_try_fmt,
+ .enum_mbus_fmt = tc358743_enum_fmt,
+};
+
+static struct v4l2_subdev_core_ops tc358743_core_ops = {
+ .g_chip_ident = tc358743_g_chip_ident,
+ .s_power = tc358743_s_power,
+};
+
+static struct v4l2_subdev_ops tc358743_subdev_ops = {
+ .core = &tc358743_core_ops,
+ .video = &tc358743_video_ops,
+};
+
+/*
+ * i2c_driver function
+ */
+static int tc358743_probe(struct i2c_client *client,
+ const struct i2c_device_id *did)
+{
+ struct tc358743_priv *priv;
+ struct soc_camera_subdev_desc *scsd;
+ int ret = 0;
+
+ /* Checking soc-camera interface */
+ scsd = client->dev.platform_data;
+ if (!scsd) {
+ dev_err(&client->dev, "Missing soc_camera_link for driver\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&client->dev, sizeof(struct tc358743_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(&client->dev, "Failed to allocate private data!\n");
+ return -ENOMEM;
+ }
+
+ v4l2_i2c_subdev_init(&priv->subdev, client, &tc358743_subdev_ops);
+
+ priv->client = client;
+ priv->ident = V4L2_IDENT_OV5640;
+
+ /*
+ * check and show product ID and manufacturer ID
+ */
+ soc_camera_power_on(&client->dev, scsd);
+
+ tc358743_set_default_fmt(priv);
+ tc358743_write_table(client, tc358743_reset, ARRAY_SIZE(tc358743_reset));
+ tc358743_write_table(client, tc358743_setting_hdmi, ARRAY_SIZE(tc358743_setting_hdmi));
+
+ if((ret = tc358743_write_edid(client, hdmi_edid, ARRAY_SIZE(hdmi_edid))))
+ printk(KERN_ERR "%s: Fail to write EDID to tc35874!\n", __FUNCTION__);
+
+ tc358743_toggle_hpd(client, 1);
+
+ soc_camera_power_off(&client->dev, scsd);
+
+ return ret;
+}
+
+static int tc358743_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id tc358743_id[] = {
+ { "tc358743", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358743_id);
+
+static struct i2c_driver tc358743_i2c_driver = {
+ .driver = {
+ .name = "tc358743",
+ .owner = THIS_MODULE,
+ },
+ .probe = tc358743_probe,
+ .remove = tc358743_remove,
+ .id_table = tc358743_id,
+};
+
+static int __init tc358743_module_init(void)
+{
+ return i2c_add_driver(&tc358743_i2c_driver);
+}
+
+static void __exit tc358743_module_exit(void)
+{
+ i2c_del_driver(&tc358743_i2c_driver);
+}
+
+module_init(tc358743_module_init);
+module_exit(tc358743_module_exit);
+
+MODULE_DESCRIPTION("SoC Camera driver for Toshiba TC358743 HDMI to CSI-2 bridge");
+MODULE_AUTHOR("Wojciech Bieganski <wbieganski@antmicro.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/soc_camera/tegra_camera/common.c b/drivers/media/platform/soc_camera/tegra_camera/common.c
index 17129d507d88..762c95724609 100644
--- a/drivers/media/platform/soc_camera/tegra_camera/common.c
+++ b/drivers/media/platform/soc_camera/tegra_camera/common.c
@@ -170,7 +170,9 @@ static int tegra_camera_activate(struct tegra_camera_dev *cam,
if (cam_ops->capture_clean)
cam_ops->capture_clean(cam);
- cam->sof = 1;
+ cam->sof[0] = 1;
+ cam->sof[1] = 1;
+ cam->sof[2] = 1;
return 0;
}
@@ -197,13 +199,21 @@ static void tegra_camera_deactivate(struct tegra_camera_dev *cam)
nvhost_module_idle_ext(cam->ndev);
- cam->sof = 0;
- cam->cal_done = 0;
+ cam->sof[0] = 0;
+ cam->sof[1] = 0;
+ cam->sof[2] = 0;
+ cam->cal_done[0] = 0;
+ cam->cal_done[1] = 0;
+ cam->cal_done[2] = 0;
}
static int tegra_camera_capture_frame(struct tegra_camera_dev *cam,
struct tegra_camera_buffer *buf)
{
+ struct soc_camera_device *icd = buf->icd;
+ struct soc_camera_subdev_desc *ssdesc = &icd->sdesc->subdev_desc;
+ struct tegra_camera_platform_data *pdata = ssdesc->drv_priv;
+ int port = pdata->port;
int err;
/* Setup capture registers */
@@ -212,10 +222,10 @@ static int tegra_camera_capture_frame(struct tegra_camera_dev *cam,
cam->ops->incr_syncpts(cam);
/* MIPI CSI pads calibration after starting capture */
- if (cam->ops->mipi_calibration && !cam->cal_done) {
+ if (cam->ops->mipi_calibration && !cam->cal_done[port-1]) {
err = cam->ops->mipi_calibration(cam, buf);
if (!err)
- cam->cal_done = 1;
+ cam->cal_done[port-1] = 1;
}
/* Issue start capture */
@@ -551,6 +561,18 @@ static int tegra_camera_start_streaming(struct vb2_queue *q, unsigned int count)
vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct tegra_camera_dev *cam = ici->priv;
+ struct soc_camera_subdev_desc *ssdesc = &icd->sdesc->subdev_desc;
+ struct tegra_camera_platform_data *pdata = ssdesc->drv_priv;
+ int port = pdata->port;
+
+ /* CSI B and CSI C can't work simultaneously */
+ if (port == TEGRA_CAMERA_PORT_CSI_B ||
+ port == TEGRA_CAMERA_PORT_CSI_C) {
+ if (cam->csi_bc_busy)
+ return -EBUSY;
+ else
+ cam->csi_bc_busy = true;
+ }
/* Start kthread to capture frame */
cam->kthread_capture_start = kthread_run(
@@ -583,6 +605,12 @@ static int tegra_camera_stop_streaming(struct vb2_queue *q)
cam->ops->capture_stop(cam, port);
+ if (port == TEGRA_CAMERA_PORT_CSI_B ||
+ port == TEGRA_CAMERA_PORT_CSI_C) {
+ cam->csi_bc_busy = false;
+ }
+
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/tegra_camera/common.h b/drivers/media/platform/soc_camera/tegra_camera/common.h
index 1d71c2ba39f5..0b7ef3874efd 100644
--- a/drivers/media/platform/soc_camera/tegra_camera/common.h
+++ b/drivers/media/platform/soc_camera/tegra_camera/common.h
@@ -130,8 +130,10 @@ struct tegra_camera_dev {
/* Test Pattern Generator mode */
int tpg_mode;
- int sof;
- int cal_done;
+ int sof[3];
+ int cal_done[3];
+
+ bool csi_bc_busy;
};
#define TC_VI_REG_RD(dev, offset) readl(dev->reg_base + offset)
diff --git a/drivers/media/platform/soc_camera/tegra_camera/vi2.c b/drivers/media/platform/soc_camera/tegra_camera/vi2.c
index 8c1f81a56b7a..fb03bb7854b1 100644
--- a/drivers/media/platform/soc_camera/tegra_camera/vi2.c
+++ b/drivers/media/platform/soc_camera/tegra_camera/vi2.c
@@ -361,6 +361,10 @@ static struct tegra_camera_clk vi2_clks0[] = {
.freq = 24000000,
},
{
+ .name = "vi_sensor2",
+ .freq = 24000000,
+ },
+ {
.name = "csi",
.freq = 408000000,
.use_devname = 1,
@@ -387,6 +391,16 @@ static struct tegra_camera_clk vi2_clks0[] = {
.freq = 102000000,
.use_devname = 1,
},
+ {
+ .name = "cilcd",
+ .freq = 102000000,
+ .use_devname = 1,
+ },
+ {
+ .name = "cile",
+ .freq = 102000000,
+ .use_devname = 1,
+ },
/* Always put "p11_d" at the end */
{
.name = "pll_d",
@@ -401,6 +415,10 @@ static struct tegra_camera_clk vi2_clks1[] = {
.use_devname = 1,
},
{
+ .name = "vi_sensor",
+ .freq = 24000000,
+ },
+ {
.name = "vi_sensor2",
.freq = 24000000,
},
@@ -414,6 +432,11 @@ static struct tegra_camera_clk vi2_clks1[] = {
.freq = 0,
},
{
+ .name = "csus",
+ .freq = 0,
+ .use_devname = 1,
+ },
+ {
.name = "sclk",
.freq = 80000000,
},
@@ -422,6 +445,11 @@ static struct tegra_camera_clk vi2_clks1[] = {
.freq = 300000000,
},
{
+ .name = "cilab",
+ .freq = 102000000,
+ .use_devname = 1,
+ },
+ {
.name = "cilcd",
.freq = 102000000,
.use_devname = 1,
@@ -589,7 +617,7 @@ static int vi2_capture_setup_csi_0(struct tegra_camera_dev *cam,
{
struct soc_camera_subdev_desc *ssdesc = &icd->sdesc->subdev_desc;
struct tegra_camera_platform_data *pdata = ssdesc->drv_priv;
- int format = 0, data_type = 0, image_size = 0;
+ int format = 0, data_type = 0, image_size = 0, im_width = 0;
u32 val;
/*
@@ -609,8 +637,10 @@ static int vi2_capture_setup_csi_0(struct tegra_camera_dev *cam,
0x3 | (0x1 << 5) | (0x40 << 8));
#endif
- TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILA_CONTROL0, 0x9);
- TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILB_CONTROL0, 0x9);
+ TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILA_CONTROL0,
+ ((pdata->continuous_clk << 6) | 0x05));
+ TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILB_CONTROL0,
+ ((pdata->continuous_clk << 6) | 0x05));
TC_VI_REG_WT(cam, TEGRA_CSI_PIXEL_STREAM_PPA_COMMAND, 0xf007);
TC_VI_REG_WT(cam, TEGRA_CSI_CSI_PIXEL_PARSER_A_INTERRUPT_MASK, 0x0);
TC_VI_REG_WT(cam, TEGRA_CSI_PIXEL_STREAM_A_CONTROL0, 0x280301f0);
@@ -650,7 +680,13 @@ static int vi2_capture_setup_csi_0(struct tegra_camera_dev *cam,
(icd->current_fmt->code == V4L2_MBUS_FMT_VYUY8_2X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_YUYV8_2X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_YVYU8_2X8)) {
- /* TBD */
+ if (icd->colorspace == V4L2_COLORSPACE_SMPTE170M)
+ im_width = 720;
+ else
+ im_width = icd->user_width;
+ format = TEGRA_IMAGE_FORMAT_T_U8_Y8__V8_Y8;
+ data_type = TEGRA_IMAGE_DT_YUV422_8;
+ image_size = im_width * 2;
} else if ((icd->current_fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_SGBRG8_1X8)) {
format = TEGRA_IMAGE_FORMAT_T_L8;
@@ -663,15 +699,14 @@ static int vi2_capture_setup_csi_0(struct tegra_camera_dev *cam,
image_size = (icd->user_width * 10) >> 3;
}
- TC_VI_REG_WT(cam, TEGRA_VI_CSI_0_IMAGE_DEF,
- (cam->tpg_mode ? 0 : 1 << 24) | (format << 16) | 0x1);
+ TC_VI_REG_WT(cam, TEGRA_VI_CSI_0_IMAGE_DEF, ((format << 16) | 0x1));
TC_VI_REG_WT(cam, TEGRA_VI_CSI_0_CSI_IMAGE_DT, data_type);
TC_VI_REG_WT(cam, TEGRA_VI_CSI_0_CSI_IMAGE_SIZE_WC, image_size);
TC_VI_REG_WT(cam, TEGRA_VI_CSI_0_CSI_IMAGE_SIZE,
- (icd->user_height << 16) | icd->user_width);
+ (icd->user_height << 16) | im_width);
/* Start pixel parser in single shot mode at beginning */
TC_VI_REG_WT(cam, TEGRA_CSI_PIXEL_STREAM_PPA_COMMAND, 0xf005);
@@ -684,7 +719,7 @@ static int vi2_capture_setup_csi_1(struct tegra_camera_dev *cam,
{
struct soc_camera_subdev_desc *ssdesc = &icd->sdesc->subdev_desc;
struct tegra_camera_platform_data *pdata = ssdesc->drv_priv;
- int format = 0, data_type = 0, image_size = 0;
+ int format = 0, data_type = 0, image_size = 0, im_width = 0;
u32 val;
/*
@@ -709,10 +744,13 @@ static int vi2_capture_setup_csi_1(struct tegra_camera_dev *cam,
#endif
if (pdata->port == TEGRA_CAMERA_PORT_CSI_B) {
- TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILC_CONTROL0, 0x9);
- TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILD_CONTROL0, 0x9);
+ TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILC_CONTROL0,
+ ((pdata->continuous_clk << 6) | 0x05));
+ TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILD_CONTROL0,
+ ((pdata->continuous_clk << 6) | 0x05));
} else if (pdata->port == TEGRA_CAMERA_PORT_CSI_C)
- TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILE_CONTROL0, 0x9);
+ TC_VI_REG_WT(cam, TEGRA_CSI_PHY_CILE_CONTROL0,
+ ((pdata->continuous_clk << 6) | 0x05));
TC_VI_REG_WT(cam, TEGRA_CSI_PIXEL_STREAM_PPB_COMMAND, 0xf007);
TC_VI_REG_WT(cam, TEGRA_CSI_CSI_PIXEL_PARSER_B_INTERRUPT_MASK, 0x0);
@@ -756,7 +794,13 @@ static int vi2_capture_setup_csi_1(struct tegra_camera_dev *cam,
(icd->current_fmt->code == V4L2_MBUS_FMT_VYUY8_2X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_YUYV8_2X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_YVYU8_2X8)) {
- /* TBD */
+ if (icd->colorspace == V4L2_COLORSPACE_SMPTE170M)
+ im_width = 720;
+ else
+ im_width = icd->user_width;
+ format = TEGRA_IMAGE_FORMAT_T_U8_Y8__V8_Y8;
+ data_type = TEGRA_IMAGE_DT_YUV422_8;
+ image_size = im_width * 2;
} else if ((icd->current_fmt->code == V4L2_MBUS_FMT_SBGGR8_1X8) ||
(icd->current_fmt->code == V4L2_MBUS_FMT_SGBRG8_1X8)) {
format = TEGRA_IMAGE_FORMAT_T_L8;
@@ -769,15 +813,14 @@ static int vi2_capture_setup_csi_1(struct tegra_camera_dev *cam,
image_size = icd->user_width * 10 / 8;
}
- TC_VI_REG_WT(cam, TEGRA_VI_CSI_1_IMAGE_DEF,
- (cam->tpg_mode ? 0 : 1 << 24) | (format << 16) | 0x1);
+ TC_VI_REG_WT(cam, TEGRA_VI_CSI_1_IMAGE_DEF, ((format << 16) | 0x1));
TC_VI_REG_WT(cam, TEGRA_VI_CSI_1_CSI_IMAGE_DT, data_type);
TC_VI_REG_WT(cam, TEGRA_VI_CSI_1_CSI_IMAGE_SIZE_WC, image_size);
TC_VI_REG_WT(cam, TEGRA_VI_CSI_1_CSI_IMAGE_SIZE,
- (icd->user_height << 16) | icd->user_width);
+ (icd->user_height << 16) | im_width);
/* Start pixel parser in single shot mode at beginning */
TC_VI_REG_WT(cam, TEGRA_CSI_PIXEL_STREAM_PPB_COMMAND, 0xf005);
@@ -794,7 +837,7 @@ static int vi2_capture_setup(struct tegra_camera_dev *cam,
int port = pdata->port;
/* Skip VI2/CSI2 setup for second and later frame capture */
- if (!cam->sof)
+ if (!cam->sof[port-1])
return 0;
/* Setup registers for CSI-A and CSI-B inputs */
@@ -1024,8 +1067,8 @@ static int vi2_capture_wait(struct tegra_camera_dev *cam,
}
/* Mark SOF flag to Zero after we captured the FIRST frame */
- if (cam->sof)
- cam->sof = 0;
+ if (cam->sof[port-1])
+ cam->sof[port-1] = 0;
/* Capture syncpt timeout err, then dump error status */
if (err) {
@@ -1125,6 +1168,7 @@ static void vi2_sw_reset(struct tegra_camera_dev *cam)
TC_VI_REG_WT(cam, TEGRA_CSI_CLKEN_OVERRIDE, 0x0);
udelay(10);
+
}
static int vi2_mipi_calibration(struct tegra_camera_dev *cam,
diff --git a/drivers/media/platform/tegra/ad5816.c b/drivers/media/platform/tegra/ad5816.c
index d86ace760940..a8274b4aebb5 100644
--- a/drivers/media/platform/tegra/ad5816.c
+++ b/drivers/media/platform/tegra/ad5816.c
@@ -681,8 +681,8 @@ static int ad5816_param_wr(struct ad5816_info *info, unsigned long arg)
} else {
if (info->s_mode != NVC_SYNC_STEREO)
ad5816_pm_wr(info->s_info,
- NVC_PWR_OFF);
- err = -EIO;
+ NVC_PWR_OFF);
+ err = -EIO;
}
} else {
err = -EINVAL;
diff --git a/drivers/media/platform/tegra/dw9718.c b/drivers/media/platform/tegra/dw9718.c
index 6797e0166551..3e879834db7f 100644
--- a/drivers/media/platform/tegra/dw9718.c
+++ b/drivers/media/platform/tegra/dw9718.c
@@ -626,8 +626,8 @@ static int dw9718_param_wr(struct dw9718_info *info, unsigned long arg)
} else {
if (info->s_mode != NVC_SYNC_STEREO)
dw9718_pm_wr(info->s_info,
- NVC_PWR_OFF);
- err = -EIO;
+ NVC_PWR_OFF);
+ err = -EIO;
}
} else {
err = -EINVAL;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 7658586fe5f4..6f52e699178b 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1334,7 +1334,9 @@ static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
/* Return -ENODATA if the tvnorms for the current input
or output is 0, meaning that it doesn't support this API. */
if (id == 0)
- return -ENODATA;
+ /* this is a hack, after updating gstreamer
+ * return to -ENODATA */
+ return -ENOTTY;
/* Return norm array in a canonical way */
for (i = 0; i <= index && id; i++) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index b0d27e114321..af82b3c047c6 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -76,6 +76,20 @@ config MFD_AIC3256_SPI
core functionality controlled via SPI.
Please select individual components.
+config MFD_APALIS_TK1_K20
+ tristate "K20 on Apalis TK1"
+ depends on SPI_MASTER && GENERIC_HARDIRQS
+ select MFD_CORE
+ help
+ The Kinetis MK20DN512 companion micro controller found on Apalis TK1
+ supports CAN, resistive touch, GPIOs and analog inputs.
+
+config APALIS_TK1_K20_EZP
+ bool "K20 on Apalis TK1 programming via EZ Port"
+ depends on MFD_APALIS_TK1_K20
+ help
+ Support for flashing new K20 firmware using EZ-Port functionality.
+
config MFD_CROS_EC
tristate "ChromeOS Embedded Controller"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index bfb394426e5f..a141a1b30017 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MFD_AIC3256) += tlv320aic3256-core.o tlv320aic3256-irq.o
obj-$(CONFIG_MFD_AIC3256_SPI) += tlv320aic3256-spi.o
obj-$(CONFIG_MFD_AIC3256_I2C) += tlv320aic3256-i2c.o
obj-$(CONFIG_MFD_SM501) += sm501.o
+obj-$(CONFIG_MFD_APALIS_TK1_K20) += apalis-tk1-k20.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
diff --git a/drivers/mfd/apalis-tk1-k20-ezp.h b/drivers/mfd/apalis-tk1-k20-ezp.h
new file mode 100644
index 000000000000..c8b0c518980a
--- /dev/null
+++ b/drivers/mfd/apalis-tk1-k20-ezp.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MFD_APALIS_TK1_K20_H
+#define __DRIVERS_MFD_APALIS_TK1_K20_H
+
+#ifdef CONFIG_APALIS_TK1_K20_EZP
+#define APALIS_TK1_K20_FW_FOPT_ADDR 0x40D
+#define APALIS_TK1_K20_FOPT_EZP_ENA BIT(1)
+#define APALIS_TK1_K20_FW_VER_ADDR 0x410
+
+#define APALIS_TK1_K20_FLASH_SIZE 0x80000
+
+/* EZ Port commands */
+#define APALIS_TK1_K20_EZP_WREN 0x06
+#define APALIS_TK1_K20_EZP_WRDI 0x04
+#define APALIS_TK1_K20_EZP_RDSR 0x05
+#define APALIS_TK1_K20_EZP_READ 0x03
+#define APALIS_TK1_K20_EZP_FREAD 0x0B
+#define APALIS_TK1_K20_EZP_SP 0x02
+#define APALIS_TK1_K20_EZP_SE 0xD8
+#define APALIS_TK1_K20_EZP_BE 0xC7
+#define APALIS_TK1_K20_EZP_RESET 0xB9
+#define APALIS_TK1_K20_EZP_WRFCCOB 0xBA
+#define APALIS_TK1_K20_EZP_FRDFCOOB 0xBB
+
+/* Bits of EZ Port status register */
+#define APALIS_TK1_K20_EZP_STA_WIP BIT(0)
+#define APALIS_TK1_K20_EZP_STA_WEN BIT(1)
+#define APALIS_TK1_K20_EZP_STA_BEDIS BIT(2)
+#define APALIS_TK1_K20_EZP_STA_WEF BIT(6)
+#define APALIS_TK1_K20_EZP_STA_FS BIT(7)
+
+#define APALIS_TK1_K20_EZP_MAX_SPEED 2000000
+#define APALIS_TK1_K20_EZP_MAX_DATA 32
+#define APALIS_TK1_K20_EZP_WRITE_SIZE 32
+
+static const struct firmware *fw_entry;
+#endif /* CONFIG_APALIS_TK1_K20_EZP */
+
+#endif /* __DRIVERS_MFD_APALIS_TK1_K20_H */
diff --git a/drivers/mfd/apalis-tk1-k20.c b/drivers/mfd/apalis-tk1-k20.c
new file mode 100644
index 000000000000..a71a4520827d
--- /dev/null
+++ b/drivers/mfd/apalis-tk1-k20.c
@@ -0,0 +1,1084 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * based on an driver for MC13xxx by:
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/apalis-tk1-k20.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "apalis-tk1-k20-ezp.h"
+#undef CONFIG_EXPERIMENTAL_K20_HSMODE
+#define APALIS_TK1_K20_MAX_MSG 4
+static const struct spi_device_id apalis_tk1_k20_device_ids[] = {
+ {
+ .name = "apalis-tk1-k20",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, apalis_tk1_k20_device_ids);
+
+static const struct of_device_id apalis_tk1_k20_dt_ids[] = {
+ {
+ .compatible = "toradex,apalis-tk1-k20",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, apalis_tk1_k20_dt_ids);
+
+static const struct regmap_config apalis_tk1_k20_regmap_spi_config = {
+ .reg_bits = 8,
+ .pad_bits = 0,
+ .val_bits = 8,
+
+ .max_register = APALIS_TK1_K20_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+#ifdef CONFIG_EXPERIMENTAL_K20_HSMODE
+ .use_single_rw = 0,
+#else
+ .use_single_rw = 1,
+#endif
+
+};
+
+static int apalis_tk1_k20_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ unsigned char w[APALIS_TK1_K20_MAX_BULK] = {APALIS_TK1_K20_READ_INST,
+ val_size, *((unsigned char *) reg)};
+ unsigned char r[APALIS_TK1_K20_MAX_BULK];
+ unsigned char *p = val;
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .len = 3,
+ .cs_change = 0,
+ .delay_usecs = 0,
+ };
+#ifdef CONFIG_EXPERIMENTAL_K20_HSMODE
+ struct spi_transfer ts[APALIS_TK1_K20_MAX_BULK /
+ APALIS_TK1_K20_MAX_MSG];
+ int i = 0;
+ int transfer_count;
+#endif
+ struct spi_message m;
+ int ret;
+
+ spi->mode = SPI_MODE_1;
+
+ if (reg_size != 1)
+ return -ENOTSUPP;
+
+ if (val_size == 1) {
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ /* no need to reinit the message*/
+ t.len = 2;
+ /* just use the same transfer */
+ ret = spi_sync(spi, &m);
+#ifdef CONFIG_EXPERIMENTAL_K20_HSMODE
+ } else if ((val_size > 1) && (val_size < APALIS_TK1_K20_MAX_BULK)) {
+ spi_message_init(&m);
+ w[0] = APALIS_TK1_K20_BULK_READ_INST;
+ t.len = 3;
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+
+ if (val_size == 2) {
+ ret = spi_sync(spi, &m);
+ } else {
+ transfer_count = val_size;
+ spi_message_init(&m);
+ ts[0].tx_buf = NULL;
+ ts[0].rx_buf = r;
+ ts[0].len = (transfer_count >= 4) ? 4 : transfer_count;
+ ts[0].cs_change = 0;
+ ts[0].delay_usecs = 2;
+
+ spi_message_add_tail(&ts[i], &m);
+ /*
+ * decrease count but remember about an extra character
+ * at the transfer beginning
+ */
+ transfer_count = transfer_count - ts[0].len + 1;
+
+ for (i = 1; transfer_count > 0; i++) {
+ ts[i].tx_buf = NULL;
+ /* first entry in r is the status */
+ ts[i].rx_buf = &r[(4 * i) - 1];
+ ts[i].len = (transfer_count >= 4) ?
+ 4 : transfer_count;
+ ts[i].cs_change = 0;
+ ts[i].delay_usecs = 2;
+ spi_message_add_tail(&ts[i], &m);
+ transfer_count = transfer_count - ts[i].len;
+ }
+ ret = spi_sync(spi, &m);
+ }
+#endif
+ } else {
+ return -ENOTSUPP;
+ }
+
+ if (r[0] == TK1_K20_SENTINEL)
+ memcpy(p, &r[1], val_size);
+ else {
+ if (r[0] != TK1_K20_INVAL) {
+ dev_err(dev, "K20 FIFO Bug!");
+ /* we've probably hit the K20 FIFO bug, try to resync */
+ spi_message_init(&m);
+ w[0] = APALIS_TK1_K20_READ_INST;
+ w[1] = 0x01;
+ w[2] = APALIS_TK1_K20_RET_REQ;
+ w[3] = 0x00;
+ t.tx_buf = w;
+ t.len = 4;
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ spi_message_init(&m);
+ t.len = val_size + 1;
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ if (r[0] == TK1_K20_SENTINEL) {
+ memcpy(p, &r[1], val_size);
+ return ret;
+ }
+ }
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int apalis_tk1_k20_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ uint8_t out_data[APALIS_TK1_K20_MAX_BULK];
+ int ret;
+#ifdef CONFIG_EXPERIMENTAL_K20_HSMODE
+ uint8_t in_data[APALIS_TK1_K20_MAX_BULK];
+ struct spi_message m;
+ struct spi_transfer t = {
+ .tx_buf = out_data,
+ .rx_buf = in_data,
+ .cs_change = 0,
+ .delay_usecs = 0,
+ };
+ struct spi_transfer ts[(APALIS_TK1_K20_MAX_BULK /
+ APALIS_TK1_K20_MAX_MSG) + 1];
+ int i = 0;
+#endif
+
+ spi->mode = SPI_MODE_1;
+
+ if (count == 2) {
+ out_data[0] = APALIS_TK1_K20_WRITE_INST;
+ out_data[1] = ((uint8_t *)data)[0];
+ out_data[2] = ((uint8_t *)data)[1];
+ ret = spi_write(spi, out_data, 3);
+#ifdef CONFIG_EXPERIMENTAL_K20_HSMODE
+ } else if ((count > 2) && (count < APALIS_TK1_K20_MAX_BULK)) {
+
+ spi_message_init(&m);
+ out_data[0] = APALIS_TK1_K20_BULK_WRITE_INST;
+ out_data[1] = count - 1;
+ memcpy(&out_data[2], data, count);
+ t.tx_buf = out_data;
+ t.len = 4;
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ /* reg. addr + 2 data bytes */
+ count = count - 3;
+
+ spi_message_init(&m);
+ for (i = 0; count > 0; i++) {
+ ts[i].tx_buf = &out_data[(i + 1) * 4];
+ ts[i].len = (count >= 4) ? 4 : count;
+ ts[i].cs_change = 0;
+ ts[i].delay_usecs = 0;
+
+ spi_message_add_tail(&ts[i], &m);
+ count = count - ts[i].len;
+ }
+ ret = spi_sync(spi, &m);
+#endif
+ } else {
+ dev_err(dev, "Apalis TK1 K20 MFD invalid write count = %d\n",
+ count);
+ return -ENOTSUPP;
+ }
+ return ret;
+}
+
+static struct regmap_bus regmap_apalis_tk1_k20_bus = {
+ .write = apalis_tk1_k20_spi_write,
+ .read = apalis_tk1_k20_spi_read,
+};
+
+void apalis_tk1_k20_lock(struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ if (!mutex_trylock(&apalis_tk1_k20->lock)) {
+ dev_dbg(apalis_tk1_k20->dev, "wait for %s from %ps\n",
+ __func__, __builtin_return_address(0));
+
+ mutex_lock(&apalis_tk1_k20->lock);
+ }
+ dev_dbg(apalis_tk1_k20->dev, "%s from %ps\n",
+ __func__, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(apalis_tk1_k20_lock);
+
+void apalis_tk1_k20_unlock(struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ dev_dbg(apalis_tk1_k20->dev, "%s from %ps\n",
+ __func__, __builtin_return_address(0));
+ mutex_unlock(&apalis_tk1_k20->lock);
+}
+EXPORT_SYMBOL(apalis_tk1_k20_unlock);
+
+int apalis_tk1_k20_reg_read(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ unsigned int offset, u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(apalis_tk1_k20->regmap, offset, val);
+ dev_vdbg(apalis_tk1_k20->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
+
+ return ret;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_reg_read);
+
+int apalis_tk1_k20_reg_write(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ unsigned int offset, u32 val)
+{
+ dev_vdbg(apalis_tk1_k20->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+
+ if (val >= BIT(24))
+ return -EINVAL;
+
+ return regmap_write(apalis_tk1_k20->regmap, offset, val);
+}
+EXPORT_SYMBOL(apalis_tk1_k20_reg_write);
+
+int apalis_tk1_k20_reg_read_bulk(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ unsigned int offset,
+ uint8_t *val, size_t size)
+{
+ int ret;
+
+ if (size > APALIS_TK1_K20_MAX_BULK)
+ return -EINVAL;
+
+ ret = regmap_bulk_read(apalis_tk1_k20->regmap, offset, val, size);
+ dev_vdbg(apalis_tk1_k20->dev, "bulk read %d bytes from [0x%02x]\n",
+ size, offset);
+
+ return ret;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_reg_read_bulk);
+
+int apalis_tk1_k20_reg_write_bulk(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ unsigned int offset, uint8_t *buf, size_t size)
+{
+ dev_vdbg(apalis_tk1_k20->dev, "bulk write %d bytes to [0x%02x]\n",
+ (unsigned int)size, offset);
+
+ if (size > APALIS_TK1_K20_MAX_BULK)
+ return -EINVAL;
+ return regmap_bulk_write(apalis_tk1_k20->regmap, offset, buf, size);
+}
+EXPORT_SYMBOL(apalis_tk1_k20_reg_write_bulk);
+
+int apalis_tk1_k20_reg_rmw(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ unsigned int offset, u32 mask, u32 val)
+{
+ dev_vdbg(apalis_tk1_k20->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+ offset, val, mask);
+
+ return regmap_update_bits(apalis_tk1_k20->regmap, offset, mask, val);
+}
+EXPORT_SYMBOL(apalis_tk1_k20_reg_rmw);
+
+int apalis_tk1_k20_irq_mask(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ int irq)
+{
+ int virq = regmap_irq_get_virq(apalis_tk1_k20->irq_data, irq);
+
+ disable_irq_nosync(virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_irq_mask);
+
+int apalis_tk1_k20_irq_unmask(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ int irq)
+{
+ int virq = regmap_irq_get_virq(apalis_tk1_k20->irq_data, irq);
+
+ enable_irq(virq);
+
+ return 0;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_irq_unmask);
+
+int apalis_tk1_k20_irq_status(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ int irq, int *enabled, int *pending)
+{
+ int ret;
+ unsigned int offmask = APALIS_TK1_K20_MSQREG;
+ unsigned int offstat = APALIS_TK1_K20_IRQREG;
+ u32 irqbit = 1 << irq;
+
+ if (irq < 0 || irq >= ARRAY_SIZE(apalis_tk1_k20->irqs))
+ return -EINVAL;
+
+ if (enabled) {
+ u32 mask;
+
+ ret = apalis_tk1_k20_reg_read(apalis_tk1_k20, offmask, &mask);
+ if (ret)
+ return ret;
+
+ *enabled = mask & irqbit;
+ }
+
+ if (pending) {
+ u32 stat;
+
+ ret = apalis_tk1_k20_reg_read(apalis_tk1_k20, offstat, &stat);
+ if (ret)
+ return ret;
+
+ *pending = stat & irqbit;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_irq_status);
+
+int apalis_tk1_k20_irq_request(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ int irq, irq_handler_t handler, const char *name, void *dev)
+{
+ int virq = regmap_irq_get_virq(apalis_tk1_k20->irq_data, irq);
+
+ return devm_request_threaded_irq(apalis_tk1_k20->dev, virq, NULL,
+ handler, IRQF_ONESHOT, name, dev);
+}
+EXPORT_SYMBOL(apalis_tk1_k20_irq_request);
+
+int apalis_tk1_k20_irq_free(struct apalis_tk1_k20_regmap *apalis_tk1_k20,
+ int irq, void *dev)
+{
+ int virq = regmap_irq_get_virq(apalis_tk1_k20->irq_data, irq);
+
+ devm_free_irq(apalis_tk1_k20->dev, virq, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(apalis_tk1_k20_irq_free);
+
+
+static int apalis_tk1_k20_add_subdevice_pdata_id(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20, const char *name,
+ void *pdata, size_t pdata_size, int id)
+{
+ struct mfd_cell cell = {
+ .platform_data = pdata,
+ .pdata_size = pdata_size,
+ };
+
+ cell.name = kmemdup(name, strlen(name) + 1, GFP_KERNEL);
+ if (!cell.name)
+ return -ENOMEM;
+
+ return mfd_add_devices(apalis_tk1_k20->dev, id, &cell, 1, NULL, 0,
+ regmap_irq_get_domain(apalis_tk1_k20->irq_data));
+}
+
+static int apalis_tk1_k20_add_subdevice_pdata(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20, const char *name,
+ void *pdata, size_t pdata_size)
+{
+ return apalis_tk1_k20_add_subdevice_pdata_id(apalis_tk1_k20, name,
+ pdata, pdata_size, -1);
+}
+
+static int apalis_tk1_k20_add_subdevice(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20, const char *name)
+{
+ return apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20, name, NULL,
+ 0);
+}
+
+static void apalis_tk1_k20_reset_chip(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ udelay(10);
+ gpio_set_value(apalis_tk1_k20->reset_gpio, 0);
+ msleep(10);
+ gpio_set_value(apalis_tk1_k20->reset_gpio, 1);
+ udelay(10);
+}
+
+#ifdef CONFIG_APALIS_TK1_K20_EZP
+static int apalis_tk1_k20_read_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20, uint8_t command,
+ int addr, int count, uint8_t *buffer)
+{
+ uint8_t w[4 + APALIS_TK1_K20_EZP_MAX_DATA];
+ uint8_t r[4 + APALIS_TK1_K20_EZP_MAX_DATA];
+ uint8_t *out;
+ struct spi_message m;
+ struct spi_device *spi = to_spi_device(apalis_tk1_k20->dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .speed_hz = APALIS_TK1_K20_EZP_MAX_SPEED,
+ };
+ int ret;
+
+ spi->mode = SPI_MODE_0;
+
+ if (count > APALIS_TK1_K20_EZP_MAX_DATA)
+ return -ENOSPC;
+
+ memset(w, 0, 4 + count);
+
+ switch (command) {
+ case APALIS_TK1_K20_EZP_READ:
+ case APALIS_TK1_K20_EZP_FREAD:
+ t.len = 4 + count;
+ w[1] = (addr & 0xFF0000) >> 16;
+ w[2] = (addr & 0xFF00) >> 8;
+ w[3] = (addr & 0xFC);
+ out = &r[4];
+ break;
+ case APALIS_TK1_K20_EZP_RDSR:
+ case APALIS_TK1_K20_EZP_FRDFCOOB:
+ t.len = 1 + count;
+ out = &r[1];
+ break;
+ default:
+ return -EINVAL;
+ }
+ w[0] = command;
+
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 0);
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 1);
+ if (ret != 0)
+ return ret;
+
+ memcpy(buffer, out, count);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_write_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20, uint8_t command,
+ int addr, int count, const uint8_t *buffer)
+{
+ uint8_t w[4 + APALIS_TK1_K20_EZP_MAX_DATA];
+ uint8_t r[4 + APALIS_TK1_K20_EZP_MAX_DATA];
+ struct spi_message m;
+ struct spi_device *spi = to_spi_device(apalis_tk1_k20->dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .speed_hz = APALIS_TK1_K20_EZP_MAX_SPEED,
+ };
+ int ret;
+
+ spi->mode = SPI_MODE_0;
+
+ if (count > APALIS_TK1_K20_EZP_MAX_DATA)
+ return -ENOSPC;
+
+ switch (command) {
+ case APALIS_TK1_K20_EZP_SP:
+ case APALIS_TK1_K20_EZP_SE:
+ t.len = 4 + count;
+ w[1] = (addr & 0xFF0000) >> 16;
+ w[2] = (addr & 0xFF00) >> 8;
+ w[3] = (addr & 0xF8);
+ memcpy(&w[4], buffer, count);
+ break;
+ case APALIS_TK1_K20_EZP_WREN:
+ case APALIS_TK1_K20_EZP_WRDI:
+ case APALIS_TK1_K20_EZP_BE:
+ case APALIS_TK1_K20_EZP_RESET:
+ case APALIS_TK1_K20_EZP_WRFCCOB:
+ t.len = 1 + count;
+ memcpy(&w[1], buffer, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+ w[0] = command;
+
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 0);
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 1);
+
+ return ret;
+}
+
+static int apalis_tk1_k20_set_wren_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ uint8_t buffer;
+
+ if (apalis_tk1_k20_write_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_WREN,
+ 0, 0, NULL) < 0)
+ return -EIO;
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_RDSR,
+ 0, 1, &buffer) < 0)
+ return -EIO;
+ if ((buffer & APALIS_TK1_K20_EZP_STA_WEN))
+ return 0;
+
+ /* If it failed try one last time */
+ if (apalis_tk1_k20_write_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_WREN,
+ 0, 0, NULL) < 0)
+ return -EIO;
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_RDSR,
+ 0, 1, &buffer) < 0)
+ return -EIO;
+ if ((buffer & APALIS_TK1_K20_EZP_STA_WEN))
+ return 0;
+
+ return -EIO;
+
+}
+
+static int apalis_tk1_k20_enter_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ uint8_t status = 0x00;
+ uint8_t buffer;
+
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 0);
+ msleep(10);
+ apalis_tk1_k20_reset_chip(apalis_tk1_k20);
+ msleep(10);
+ gpio_set_value(apalis_tk1_k20->ezpcs_gpio, 1);
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_RDSR,
+ 0, 1, &buffer) < 0)
+ goto bad;
+ status = buffer;
+ if (apalis_tk1_k20_write_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_WREN,
+ 0, 0, NULL) < 0)
+ goto bad;
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_RDSR,
+ 0, 1, &buffer) < 0)
+ goto bad;
+ if ((buffer & APALIS_TK1_K20_EZP_STA_WEN) && buffer != status)
+ return 0;
+
+bad:
+ dev_err(apalis_tk1_k20->dev, "Error entering EZ Port mode.\n");
+ return -EIO;
+}
+
+static int apalis_tk1_k20_erase_chip_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ uint8_t buffer[16];
+ int i;
+
+ if (apalis_tk1_k20_set_wren_ezport(apalis_tk1_k20))
+ goto bad;
+ if (apalis_tk1_k20_write_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_BE,
+ 0, 0, NULL) < 0)
+ goto bad;
+
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20, APALIS_TK1_K20_EZP_RDSR,
+ 0, 1, buffer) < 0)
+ goto bad;
+
+ i = 0;
+ while (buffer[0] & APALIS_TK1_K20_EZP_STA_WIP) {
+ msleep(20);
+ if ((apalis_tk1_k20_read_ezport(apalis_tk1_k20,
+ APALIS_TK1_K20_EZP_RDSR, 0, 1, buffer) < 0) || (i > 50))
+ goto bad;
+ i++;
+ }
+
+ return 0;
+
+bad:
+ dev_err(apalis_tk1_k20->dev, "Error erasing the chip.\n");
+ return -EIO;
+}
+
+static int apalis_tk1_k20_flash_chip_ezport(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ uint8_t buffer;
+ const uint8_t *fw_chunk;
+ int i, j, transfer_size;
+
+ for (i = 0; i < fw_entry->size;) {
+ if (apalis_tk1_k20_set_wren_ezport(apalis_tk1_k20))
+ goto bad;
+
+ fw_chunk = fw_entry->data + i;
+ transfer_size = (i + APALIS_TK1_K20_EZP_WRITE_SIZE <
+ fw_entry->size) ? APALIS_TK1_K20_EZP_WRITE_SIZE
+ : (fw_entry->size - i - 1);
+ dev_vdbg(apalis_tk1_k20->dev,
+ "Apalis TK1 K20 MFD transfer_size = %d addr = 0x%X\n",
+ transfer_size, i);
+ if (apalis_tk1_k20_write_ezport(apalis_tk1_k20,
+ APALIS_TK1_K20_EZP_SP, i,
+ transfer_size, fw_chunk) < 0)
+ goto bad;
+ udelay(2000);
+ if (apalis_tk1_k20_read_ezport(apalis_tk1_k20,
+ APALIS_TK1_K20_EZP_RDSR, 0, 1, &buffer) < 0)
+ goto bad;
+
+ j = 0;
+ while (buffer & APALIS_TK1_K20_EZP_STA_WIP) {
+ msleep(10);
+ if ((apalis_tk1_k20_read_ezport(apalis_tk1_k20,
+ APALIS_TK1_K20_EZP_RDSR, 0, 1,
+ &buffer) < 0) || (j > 10000))
+ goto bad;
+ j++;
+ }
+ i += APALIS_TK1_K20_EZP_WRITE_SIZE;
+ }
+
+ return 0;
+
+bad:
+ dev_err(apalis_tk1_k20->dev, "Error writing to the chip.\n");
+ return -EIO;
+}
+
+static uint8_t apalis_tk1_k20_fw_ezport_status(void)
+{
+ return fw_entry->data[APALIS_TK1_K20_FW_FOPT_ADDR] &
+ APALIS_TK1_K20_FOPT_EZP_ENA;
+}
+
+static uint8_t apalis_tk1_k20_get_fw_revision(void)
+{
+ if (fw_entry)
+ if (fw_entry->size > APALIS_TK1_K20_FW_VER_ADDR)
+ return fw_entry->data[APALIS_TK1_K20_FW_VER_ADDR];
+ return 0;
+}
+#endif /* CONFIG_APALIS_TK1_K20_EZP */
+
+
+#ifdef CONFIG_OF
+static int apalis_tk1_k20_probe_flags_dt(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ struct device_node *np = apalis_tk1_k20->dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "toradex,apalis-tk1-k20-uses-adc"))
+ apalis_tk1_k20->flags |= APALIS_TK1_K20_USES_ADC;
+
+ if (of_property_read_bool(np, "toradex,apalis-tk1-k20-uses-tsc"))
+ apalis_tk1_k20->flags |= APALIS_TK1_K20_USES_TSC;
+
+ if (of_property_read_bool(np, "toradex,apalis-tk1-k20-uses-can"))
+ apalis_tk1_k20->flags |= APALIS_TK1_K20_USES_CAN;
+
+ if (of_property_read_bool(np, "toradex,apalis-tk1-k20-uses-gpio"))
+ apalis_tk1_k20->flags |= APALIS_TK1_K20_USES_GPIO;
+
+ return 0;
+}
+
+static int apalis_tk1_k20_probe_gpios_dt(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ struct device_node *np = apalis_tk1_k20->dev->of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ apalis_tk1_k20->reset_gpio = of_get_named_gpio(np, "rst-gpio", 0);
+ if (apalis_tk1_k20->reset_gpio < 0)
+ return apalis_tk1_k20->reset_gpio;
+ gpio_request(apalis_tk1_k20->reset_gpio, "apalis-tk1-k20-reset");
+ gpio_direction_output(apalis_tk1_k20->reset_gpio, 1);
+
+ apalis_tk1_k20->ezpcs_gpio = of_get_named_gpio(np, "ezport-cs-gpio", 0);
+ if (apalis_tk1_k20->ezpcs_gpio < 0)
+ return apalis_tk1_k20->ezpcs_gpio;
+ gpio_request(apalis_tk1_k20->ezpcs_gpio, "apalis-tk1-k20-ezpcs");
+ gpio_direction_output(apalis_tk1_k20->ezpcs_gpio, 1);
+
+ apalis_tk1_k20->int2_gpio = of_get_named_gpio(np, "int2-gpio", 0);
+ if (apalis_tk1_k20->int2_gpio < 0)
+ return apalis_tk1_k20->int2_gpio;
+ gpio_request(apalis_tk1_k20->int2_gpio, "apalis-tk1-k20-int2");
+ gpio_direction_output(apalis_tk1_k20->int2_gpio, 1);
+
+ return 0;
+}
+#else
+static inline int apalis_tk1_k20_probe_flags_dt(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ return -ENODEV;
+}
+static inline int apalis_tk1_k20_probe_gpios_dt(
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20)
+{
+ return -ENODEV;
+}
+#endif
+
+int apalis_tk1_k20_dev_init(struct device *dev)
+{
+ struct apalis_tk1_k20_platform_data *pdata = dev_get_platdata(dev);
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20 = dev_get_drvdata(dev);
+ uint32_t revision = 0x00;
+ int ret, i;
+ int erase_only = 0;
+
+ apalis_tk1_k20->dev = dev;
+
+ ret = apalis_tk1_k20_probe_gpios_dt(apalis_tk1_k20);
+ if ((ret < 0) && pdata) {
+ if (pdata) {
+ apalis_tk1_k20->ezpcs_gpio = pdata->ezpcs_gpio;
+ apalis_tk1_k20->reset_gpio = pdata->reset_gpio;
+ apalis_tk1_k20->int2_gpio = pdata->int2_gpio;
+ } else {
+ dev_err(dev, "Error claiming GPIOs\n");
+ ret = -EINVAL;
+ goto bad;
+ }
+ }
+ apalis_tk1_k20_reset_chip(apalis_tk1_k20);
+ msleep(10);
+ ret = apalis_tk1_k20_reg_read(apalis_tk1_k20, APALIS_TK1_K20_REVREG,
+ &revision);
+
+#ifdef CONFIG_APALIS_TK1_K20_EZP
+ if ((request_firmware(&fw_entry, "apalis-tk1-k20.bin", dev) < 0)
+ && (revision != APALIS_TK1_K20_FW_VER)) {
+ dev_err(apalis_tk1_k20->dev,
+ "Unsupported firmware version %d.%d and no local" \
+ " firmware file available.\n",
+ (revision & 0xF0 >> 8),
+ (revision & 0x0F));
+ ret = -ENOTSUPP;
+ goto bad;
+ }
+
+ if ((fw_entry == NULL) && (revision != APALIS_TK1_K20_FW_VER)) {
+ dev_err(apalis_tk1_k20->dev,
+ "Unsupported firmware version %d.%d and no local" \
+ " firmware file available.\n",
+ (revision & 0xF0 >> 8),
+ (revision & 0x0F));
+ ret = -ENOTSUPP;
+ goto bad;
+ }
+
+ if (fw_entry != NULL) {
+ if (fw_entry->size == 1)
+ erase_only = 1;
+ }
+
+ if ((apalis_tk1_k20_get_fw_revision() != APALIS_TK1_K20_FW_VER) &&
+ (revision != APALIS_TK1_K20_FW_VER) && !erase_only &&
+ (fw_entry != NULL)) {
+ dev_err(apalis_tk1_k20->dev,
+ "Unsupported firmware version in both the device " \
+ "as well as the local firmware file.\n");
+ release_firmware(fw_entry);
+ ret = -ENOTSUPP;
+ goto bad;
+ }
+
+ if ((revision != APALIS_TK1_K20_FW_VER) && !erase_only &&
+ (!apalis_tk1_k20_fw_ezport_status()) &&
+ (fw_entry != NULL)) {
+ dev_err(apalis_tk1_k20->dev,
+ "Unsupported firmware version in the device and the " \
+ "local firmware file disables the EZ Port.\n");
+ release_firmware(fw_entry);
+ ret = -ENOTSUPP;
+ goto bad;
+ }
+
+ if (((revision != APALIS_TK1_K20_FW_VER) || erase_only) &&
+ (fw_entry != NULL)) {
+ i = 0;
+ while (apalis_tk1_k20_enter_ezport(apalis_tk1_k20) < 0
+ && i++ < 5) {
+ msleep(50);
+ }
+ if (i >= 5) {
+ dev_err(apalis_tk1_k20->dev,
+ "Problem entering EZ port mode.\n");
+ release_firmware(fw_entry);
+ ret = -EIO;
+ goto bad;
+ }
+ if (apalis_tk1_k20_erase_chip_ezport(apalis_tk1_k20) < 0) {
+ dev_err(apalis_tk1_k20->dev,
+ "Problem erasing the chip.\n");
+ release_firmware(fw_entry);
+ ret = -EIO;
+ goto bad;
+ }
+ if (erase_only) {
+ dev_err(apalis_tk1_k20->dev,
+ "Chip fully erased.\n");
+ release_firmware(fw_entry);
+ ret = -EIO;
+ goto bad;
+ }
+ if (apalis_tk1_k20_flash_chip_ezport(apalis_tk1_k20) < 0) {
+ dev_err(apalis_tk1_k20->dev,
+ "Problem flashing new firmware.\n");
+ release_firmware(fw_entry);
+ ret = -EIO;
+ goto bad;
+ }
+ }
+ if (fw_entry != NULL)
+ release_firmware(fw_entry);
+
+ msleep(10);
+ apalis_tk1_k20_reset_chip(apalis_tk1_k20);
+ msleep(10);
+
+ ret = apalis_tk1_k20_reg_read(apalis_tk1_k20, APALIS_TK1_K20_REVREG,
+ &revision);
+#endif /* CONFIG_APALIS_TK1_K20_EZP */
+
+ if (ret) {
+ dev_err(apalis_tk1_k20->dev, "Device is not answering.\n");
+ goto bad;
+ }
+
+ if (revision != APALIS_TK1_K20_FW_VER) {
+ dev_err(apalis_tk1_k20->dev,
+ "Unsupported firmware version %d.%d.\n",
+ ((revision & 0xF0) >> 4), (revision & 0x0F));
+ ret = -ENOTSUPP;
+ goto bad;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(apalis_tk1_k20->irqs); i++) {
+ apalis_tk1_k20->irqs[i].reg_offset = i /
+ APALIS_TK1_K20_IRQ_PER_REG;
+ apalis_tk1_k20->irqs[i].mask = BIT(i %
+ APALIS_TK1_K20_IRQ_PER_REG);
+ }
+
+ apalis_tk1_k20->irq_chip.name = dev_name(dev);
+ apalis_tk1_k20->irq_chip.status_base = APALIS_TK1_K20_IRQREG;
+ apalis_tk1_k20->irq_chip.mask_base = APALIS_TK1_K20_MSQREG;
+ apalis_tk1_k20->irq_chip.ack_base = APALIS_TK1_K20_IRQREG;
+ apalis_tk1_k20->irq_chip.irq_reg_stride = 0;
+ apalis_tk1_k20->irq_chip.num_regs = APALIS_TK1_K20_IRQ_REG_CNT;
+ apalis_tk1_k20->irq_chip.irqs = apalis_tk1_k20->irqs;
+ apalis_tk1_k20->irq_chip.num_irqs = ARRAY_SIZE(apalis_tk1_k20->irqs);
+
+ ret = regmap_add_irq_chip(apalis_tk1_k20->regmap, apalis_tk1_k20->irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING, 0, &apalis_tk1_k20->irq_chip,
+ &apalis_tk1_k20->irq_data);
+ if (ret)
+ goto bad;
+
+ mutex_init(&apalis_tk1_k20->lock);
+
+ if (apalis_tk1_k20_probe_flags_dt(apalis_tk1_k20) < 0 && pdata)
+ apalis_tk1_k20->flags = pdata->flags;
+
+ if (pdata) {
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_TSC)
+ apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20,
+ "apalis-tk1-k20-ts",
+ &pdata->touch, sizeof(pdata->touch));
+
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_ADC)
+ apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20,
+ "apalis-tk1-k20-adc",
+ &pdata->adc, sizeof(pdata->adc));
+
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_CAN) {
+ /* We have 2 CAN devices inside K20 */
+ pdata->can0.id = 0;
+ pdata->can1.id = 1;
+ apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20,
+ "apalis-tk1-k20-can",
+ &pdata->can0, sizeof(pdata->can0));
+ apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20,
+ "apalis-tk1-k20-can",
+ &pdata->can1, sizeof(pdata->can1));
+ }
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_GPIO)
+ apalis_tk1_k20_add_subdevice_pdata(apalis_tk1_k20,
+ "apalis-tk1-k20-gpio",
+ &pdata->gpio, sizeof(pdata->gpio));
+ } else {
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_TSC)
+ apalis_tk1_k20_add_subdevice(apalis_tk1_k20,
+ "apalis-tk1-k20-ts");
+
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_ADC)
+ apalis_tk1_k20_add_subdevice(apalis_tk1_k20,
+ "apalis-tk1-k20-adc");
+
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_CAN) {
+ /* We have 2 CAN devices inside K20 */
+ apalis_tk1_k20_add_subdevice_pdata_id(apalis_tk1_k20,
+ "apalis-tk1-k20-can",
+ NULL, 0, 0);
+ apalis_tk1_k20_add_subdevice_pdata_id(apalis_tk1_k20,
+ "apalis-tk1-k20-can",
+ NULL, 0, 1);
+ }
+
+ if (apalis_tk1_k20->flags & APALIS_TK1_K20_USES_GPIO)
+ apalis_tk1_k20_add_subdevice(apalis_tk1_k20,
+ "apalis-tk1-k20-gpio");
+ }
+
+ dev_info(apalis_tk1_k20->dev, "Apalis TK1 K20 MFD driver.\n"
+ "Firmware version %d.%d.\n", FW_MAJOR, FW_MINOR);
+
+ return 0;
+
+bad:
+ if (apalis_tk1_k20->ezpcs_gpio >= 0)
+ gpio_free(apalis_tk1_k20->ezpcs_gpio);
+ if (apalis_tk1_k20->reset_gpio >= 0)
+ gpio_free(apalis_tk1_k20->reset_gpio);
+ if (apalis_tk1_k20->int2_gpio >= 0)
+ gpio_free(apalis_tk1_k20->int2_gpio);
+
+ return ret;
+}
+
+
+static int apalis_tk1_k20_spi_probe(struct spi_device *spi)
+{
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+ int ret;
+
+ apalis_tk1_k20 = devm_kzalloc(&spi->dev, sizeof(*apalis_tk1_k20),
+ GFP_KERNEL);
+ if (!apalis_tk1_k20)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, apalis_tk1_k20);
+
+ spi->mode = SPI_MODE_1;
+
+ apalis_tk1_k20->irq = spi->irq;
+
+ spi->max_speed_hz = (spi->max_speed_hz >= APALIS_TK1_K20_MAX_SPI_SPEED)
+ ? APALIS_TK1_K20_MAX_SPI_SPEED : spi->max_speed_hz;
+
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ apalis_tk1_k20->regmap = devm_regmap_init(&spi->dev,
+ &regmap_apalis_tk1_k20_bus, &spi->dev,
+ &apalis_tk1_k20_regmap_spi_config);
+ if (IS_ERR(apalis_tk1_k20->regmap)) {
+ ret = PTR_ERR(apalis_tk1_k20->regmap);
+ dev_err(&spi->dev, "Failed to initialize regmap: %d\n", ret);
+ return ret;
+ }
+
+ return apalis_tk1_k20_dev_init(&spi->dev);
+}
+
+static int apalis_tk1_k20_spi_remove(struct spi_device *spi)
+{
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20 =
+ dev_get_drvdata(&spi->dev);
+
+ if (apalis_tk1_k20->ezpcs_gpio >= 0)
+ gpio_free(apalis_tk1_k20->ezpcs_gpio);
+ if (apalis_tk1_k20->reset_gpio >= 0)
+ gpio_free(apalis_tk1_k20->reset_gpio);
+ if (apalis_tk1_k20->int2_gpio >= 0)
+ gpio_free(apalis_tk1_k20->int2_gpio);
+
+ kfree(spi->controller_data);
+ spi->controller_data = NULL;
+
+ mfd_remove_devices(&spi->dev);
+ regmap_del_irq_chip(apalis_tk1_k20->irq, apalis_tk1_k20->irq_data);
+ mutex_destroy(&apalis_tk1_k20->lock);
+
+ return 0;
+}
+
+static struct spi_driver apalis_tk1_k20_spi_driver = {
+ .id_table = apalis_tk1_k20_device_ids,
+ .driver = {
+ .name = "apalis-tk1-k20",
+ .of_match_table = apalis_tk1_k20_dt_ids,
+ },
+ .probe = apalis_tk1_k20_spi_probe,
+ .remove = apalis_tk1_k20_spi_remove,
+};
+
+static int __init apalis_tk1_k20_init(void)
+{
+ return spi_register_driver(&apalis_tk1_k20_spi_driver);
+}
+subsys_initcall(apalis_tk1_k20_init);
+
+static void __exit apalis_tk1_k20_exit(void)
+{
+ spi_unregister_driver(&apalis_tk1_k20_spi_driver);
+}
+module_exit(apalis_tk1_k20_exit);
+
+MODULE_DESCRIPTION("MFD driver for Kinetis MK20DN512 MCU on Apalis TK1");
+MODULE_AUTHOR("Dominik Sliwa <dominik.sliwa@toradex.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 4e70b5bd7413..252757e0e466 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -285,7 +285,7 @@ struct tuning_t2t_coeffs t12x_tuning_coeffs[] = {
SET_TUNING_COEFFS("sdhci-tegra.3", 1150, 950, 27, 118295,
27, 118295, 48, 188148),
SET_TUNING_COEFFS("sdhci-tegra.2", 1150, 950, 29, 124427,
- 29, 124427, 54, 203707),
+ 29, 124427, 54, 203707),
SET_TUNING_COEFFS("sdhci-tegra.0", 1150, 950, 25, 115933,
25, 115933, 47, 187224),
};
@@ -363,6 +363,8 @@ struct tap_hole_coeffs t12x_tap_hole_coeffs[] = {
281460, 1262, 194452),
SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 204000, 874, 85243, 874,
85243, 449, 57321),
+ SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 200000, 874, 85243, 874,
+ 85243, 449, 57321),
SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 136000, 1554, 167210, 1554,
167210, 793, 115672),
SET_TAP_HOLE_COEFFS("sdhci-tegra.0", 100000, 2290, 255734, 2290,
@@ -1204,15 +1206,25 @@ static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
/* External loopback is valid for sdmmc3 only */
if ((soc_data->nvquirks & NVQUIRK_DISABLE_EXTERNAL_LOOPBACK) &&
(tegra_host->instance == 2)) {
+#ifdef CONFIG_MACH_APALIS_TK1
+ /*
+ * Disable the external loopback and use the internal loopback as per
+ * SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits being set to
+ * 0xfffd according to the TRM.
+ */
+#else /* CONFIG_MACH_APALIS_TK1 */
if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
&& (host->mmc->pm_flags &
MMC_PM_KEEP_POWER)) {
+#endif /* CONFIG_MACH_APALIS_TK1 */
misc_ctrl &= ~(1 <<
SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
+#ifndef CONFIG_MACH_APALIS_TK1
} else {
misc_ctrl |= (1 <<
SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
}
+#endif /* !CONFIG_MACH_APALIS_TK1 */
}
sdhci_writel(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
@@ -3262,6 +3274,13 @@ out:
SDHCI_TEGRA_DBG("%s: Freq tuning done\n", mmc_hostname(sdhci->mmc));
if (enable_lb_clk) {
misc_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_MISC_CTRL);
+#ifdef CONFIG_MACH_APALIS_TK1
+ /*
+ * Disable the external loopback and use the internal loopback
+ * as per SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits
+ * being set to 0xfffd according to the TRM.
+ */
+#else /* CONFIG_MACH_APALIS_TK1 */
if (err) {
/* Tuning is failed and card will try to enumerate in
* Legacy High Speed mode. So, Enable External Loopback
@@ -3269,10 +3288,10 @@ out:
*/
misc_ctrl |= (1 <<
SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
- } else {
+ } else
+#endif /* CONFIG_MACH_APALIS_TK1 */
misc_ctrl &= ~(1 <<
SDHCI_VNDR_MISC_CTRL_EN_EXT_LOOPBACK_SHIFT);
- }
sdhci_writel(sdhci, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
}
return err;
@@ -3973,9 +3992,9 @@ static struct sdhci_tegra_soc_data soc_data_tegra12 = {
.parent_clk_list = {"pll_p", "pll_c"},
.tuning_freq_list = {81600000, 136000000, 200000000},
.t2t_coeffs = t12x_tuning_coeffs,
- .t2t_coeffs_count = 3,
+ .t2t_coeffs_count = ARRAY_SIZE(t12x_tuning_coeffs),
.tap_hole_coeffs = t12x_tap_hole_coeffs,
- .tap_hole_coeffs_count = 13,
+ .tap_hole_coeffs_count = ARRAY_SIZE(t12x_tap_hole_coeffs),
};
static const struct of_device_id sdhci_tegra_dt_match[] = {
@@ -4321,6 +4340,17 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
host->mmc->pm_caps |= plat->pm_caps;
host->mmc->pm_flags |= plat->pm_flags;
+#ifdef APALIS_TK1_V10
+ /*
+ * Enable card detect polling as we can't use SD1_CD# aka
+ * SDMMC3_CLK_LB_OUT for now as it features some magic properties even
+ * though the external loopback is disabled and the internal loopback
+ * used as per SDMMC_VENDOR_MISC_CNTRL_0 register's SDMMC_SPARE1 bits
+ * being set to 0xfffd according to the TRM!
+ */
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+#endif /* APALIS_TK1_V10 */
+
host->mmc->caps |= MMC_CAP_ERASE;
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -4333,9 +4363,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
}
host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
- /* disable access to boot partitions */
- host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
-
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
if (soc_data->nvquirks & NVQUIRK_ENABLE_HS200)
host->mmc->caps2 |= MMC_CAP2_HS200;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index e456b70933c2..c8287d04a51b 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -83,6 +83,12 @@ config CAN_MCP251X
---help---
Driver for the Microchip MCP251x SPI CAN controllers.
+config CAN_APALIS_TK1_K20
+ tristate "Apalis TK1 K20 CAN controllers"
+ depends on MFD_APALIS_TK1_K20
+ ---help---
+ Driver for the Apalis TK1 K20 CAN controllers.
+
config CAN_BFIN
depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x
tristate "Analog Devices Blackfin on-chip CAN"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c7440392adbb..940782b20494 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-$(CONFIG_CAN_APALIS_TK1_K20) += apalis-tk1-k20-can.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
diff --git a/drivers/net/can/apalis-tk1-k20-can.c b/drivers/net/can/apalis-tk1-k20-can.c
new file mode 100644
index 000000000000..1df9fae96cc3
--- /dev/null
+++ b/drivers/net/can/apalis-tk1-k20-can.c
@@ -0,0 +1,836 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * CAN bus driver for Apalis TK1 K20 CAN Controller over MFD device
+ * based on MCP251x CAN driver
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/led.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/apalis-tk1-k20.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/* Buffer size required for the largest transfer (i.e., reading a
+ * frame)
+ */
+#define CAN_FRAME_MAX_LEN 8
+#define CAN_HEADER_MAX_LEN 5
+#define CAN_TRANSFER_BUF_LEN (CAN_HEADER_MAX_LEN + CAN_FRAME_MAX_LEN)
+#define CAN_FRAME_MAX_BITS 128
+#define CAN_MAX_CONTINUOUS_READ 8
+
+#define MB_DLC_OFF 0
+#define MB_EID_OFF 1
+#define MB_RTR_SHIFT 4
+#define MB_IDE_SHIFT 5
+#define MB_DLC_MASK 0xF
+#define MB_EID_LEN 4
+
+#define CANCTRL_MODMASK 0x03
+#define CANCTRL_INTMASK 0x38
+#define CANCTRL_INTEN BIT(2)
+#define CANINTF_RX BIT(3)
+#define CANINTF_TX BIT(4)
+#define CANINTF_ERR BIT(5)
+
+#define EFLG_EWARN 0x01
+#define EFLG_RXWAR 0x02
+#define EFLG_TXWAR 0x04
+#define EFLG_RXEP 0x08
+#define EFLG_TXEP 0x10
+#define EFLG_TXBO 0x20
+#define EFLG_RXOVR 0x40
+
+#define TX_ECHO_SKB_MAX 1
+
+#define K20_CAN_MAX_ID 1
+
+#define DEVICE_NAME "apalis-tk1-k20-can"
+
+static const struct can_bittiming_const apalis_tk1_k20_can_bittiming_const = {
+ .name = "tk1-k20-can",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+struct apalis_tk1_k20_priv {
+ struct can_priv can;
+ struct net_device *net;
+ struct apalis_tk1_k20_regmap *apalis_tk1_k20;
+ struct apalis_tk1_k20_can_platform_data *pdata;
+
+ struct sk_buff *tx_skb;
+ int tx_len;
+
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct work_struct restart_work;
+ struct mutex apalis_tk1_k20_can_lock;
+
+ int force_quit;
+ int after_suspend;
+#define AFTER_SUSPEND_UP 1
+#define AFTER_SUSPEND_DOWN 2
+#define AFTER_SUSPEND_RESTART 4
+ int restart_tx;
+};
+
+static void apalis_tk1_k20_can_clean(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
+ if (priv->tx_skb)
+ dev_kfree_skb(priv->tx_skb);
+ if (priv->tx_len)
+ can_free_echo_skb(priv->net, 0);
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+}
+
+static void apalis_tk1_k20_can_hw_tx_frame(struct net_device *net, u8 *buf,
+ int len, int tx_buf_idx)
+{
+ /* TODO: Implement multiple TX buffer handling */
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ apalis_tk1_k20_reg_write_bulk(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_OUT_BUF
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), buf, len);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+}
+
+static void apalis_tk1_k20_can_hw_tx(struct net_device *net,
+ struct can_frame *frame, int tx_buf_idx)
+{
+ u8 buf[CAN_TRANSFER_BUF_LEN];
+
+ buf[MB_DLC_OFF] = frame->can_dlc;
+ memcpy(buf + MB_EID_OFF, &frame->can_id, MB_EID_LEN);
+ memcpy(buf + CAN_HEADER_MAX_LEN, frame->data, frame->can_dlc);
+
+ apalis_tk1_k20_can_hw_tx_frame(net, buf, frame->can_dlc
+ + CAN_HEADER_MAX_LEN, tx_buf_idx);
+}
+
+static void apalis_tk1_k20_can_hw_rx_frame(struct net_device *net, u8 *buf,
+ int buf_idx)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+
+ apalis_tk1_k20_reg_read_bulk(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_OUT_BUF
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), buf,
+ CAN_TRANSFER_BUF_LEN);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+}
+
+static u32 apalis_tk1_k20_can_available_rx_frames(struct net_device *net)
+{
+ u32 frame_cnt = 0;
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ int ret;
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+
+ ret = apalis_tk1_k20_reg_read(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_IN_BUF_CNT
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), &frame_cnt);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+
+ return (ret == 0) ? frame_cnt : 0;
+}
+
+static void apalis_tk1_k20_can_hw_rx(struct net_device *net, int buf_idx)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ struct sk_buff *skb;
+ struct can_frame *frame;
+ u32 available_frames = 0;
+ u8 buf[CAN_TRANSFER_BUF_LEN];
+
+ skb = alloc_can_skb(priv->net, &frame);
+ if (!skb) {
+ dev_err(&net->dev, "cannot allocate RX skb\n");
+ priv->net->stats.rx_dropped++;
+ return;
+ }
+
+ available_frames = apalis_tk1_k20_can_available_rx_frames(net);
+
+ while ((available_frames > 0)) {
+ apalis_tk1_k20_can_hw_rx_frame(net, buf, buf_idx);
+ memcpy(&frame->can_id, buf + MB_EID_OFF, MB_EID_LEN);
+ /* Data length */
+ frame->can_dlc = get_can_dlc(buf[MB_DLC_OFF]);
+ memcpy(frame->data, buf + CAN_HEADER_MAX_LEN, frame->can_dlc);
+
+ priv->net->stats.rx_packets++;
+ priv->net->stats.rx_bytes += frame->can_dlc;
+
+ can_led_event(priv->net, CAN_LED_EVENT_RX);
+
+ netif_rx_ni(skb);
+ available_frames--;
+ }
+}
+
+static netdev_tx_t apalis_tk1_k20_can_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ if (priv->tx_skb || priv->tx_len) {
+ dev_warn(&net->dev, "hard_xmit called while TX busy\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (can_dropped_invalid_skb(net, skb))
+ return NETDEV_TX_OK;
+
+ netif_stop_queue(net);
+ priv->tx_skb = skb;
+ queue_work(priv->wq, &priv->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+static int apalis_tk1_k20_can_do_set_mode(struct net_device *net,
+ enum can_mode mode)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ apalis_tk1_k20_can_clean(net);
+ /* We have to delay work since I/O may sleep */
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ priv->restart_tx = 1;
+ if (priv->can.restart_ms == 0)
+ priv->after_suspend = AFTER_SUSPEND_RESTART;
+ queue_work(priv->wq, &priv->restart_work);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int apalis_tk1_k20_can_set_normal_mode(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ /* Enable interrupts */
+ apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20, APALIS_TK1_K20_CANREG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id),
+ CANCTRL_INTEN, CANCTRL_INTEN);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* Put device into loopback mode */
+ apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANREG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), CANCTRL_MODMASK,
+ CAN_CTRLMODE_LOOPBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+ apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANREG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), CANCTRL_MODMASK,
+ CAN_CTRLMODE_LISTENONLY);
+ } else {
+ /* Put device into normal mode */
+ apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANREG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), CANCTRL_MODMASK,
+ 0x00);
+ }
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+}
+
+static int apalis_tk1_k20_can_do_set_bittiming(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ if ((bt->bitrate / APALIS_TK1_CAN_CLK_UNIT) > 0xFF)
+ return -EINVAL;
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ apalis_tk1_k20_reg_write(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_BAUD_REG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), (bt->bitrate /
+ APALIS_TK1_CAN_CLK_UNIT) & 0xFF);
+ apalis_tk1_k20_reg_write(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_BIT_1
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id),
+ ((bt->sjw & 0x3) << 6) |
+ ((bt->phase_seg2 & 0x7) << 3) |
+ (bt->phase_seg1 & 0x7));
+ apalis_tk1_k20_reg_write(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN_BIT_2
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id),
+ (bt->prop_seg & 0x7));
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ dev_dbg(priv->apalis_tk1_k20->dev, "Setting CAN%d bitrate " \
+ "to %d (0x%X * 6.25kHz)\n", priv->pdata->id, bt->bitrate,
+ (bt->bitrate / APALIS_TK1_CAN_CLK_UNIT) & 0xFF);
+ dev_dbg(priv->apalis_tk1_k20->dev, "Setting CAN%d bit timing " \
+ "RJW = %d, PSEG1 = %d, PSEG2 = %d, PROPSEG = %d\n",
+ priv->pdata->id, bt->sjw, bt->phase_seg1,
+ bt->phase_seg2, bt->prop_seg);
+ dev_dbg(priv->apalis_tk1_k20->dev, "Setting CAN%d bit timing " \
+ "bitrate = %d\n", priv->pdata->id, bt->bitrate);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_can_setup(struct net_device *net,
+ struct apalis_tk1_k20_priv *priv)
+{
+ apalis_tk1_k20_can_do_set_bittiming(net);
+
+ return 0;
+}
+
+static int apalis_tk1_k20_can_hw_reset(struct net_device *net)
+{
+ return 0;
+}
+
+static void apalis_tk1_k20_can_open_clean(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ struct apalis_tk1_k20_can_platform_data *pdata = priv->pdata;
+
+ if (pdata->id == 0)
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ, priv);
+ if (pdata->id == 1)
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ, priv);
+ close_candev(net);
+}
+
+static int apalis_tk1_k20_can_stop(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ struct apalis_tk1_k20_can_platform_data *pdata = priv->pdata;
+
+ close_candev(net);
+
+ priv->force_quit = 1;
+ if (pdata->id == 0)
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ, priv);
+ if (pdata->id == 1)
+ apalis_tk1_k20_irq_free(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ if (pdata->id == 0)
+ apalis_tk1_k20_irq_mask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ);
+ if (pdata->id == 1)
+ apalis_tk1_k20_irq_mask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ);
+ /* Disable and clear pending interrupts */
+ priv->can.state = CAN_STATE_STOPPED;
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+
+ can_led_event(net, CAN_LED_EVENT_STOP);
+
+ return 0;
+}
+
+static void apalis_tk1_k20_can_error_skb(struct net_device *net, int can_id,
+ int data1)
+{
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_err_skb(net, &frame);
+ if (skb) {
+ frame->can_id |= can_id;
+ frame->data[1] = data1;
+ netif_rx_ni(skb);
+ } else {
+ netdev_err(net, "cannot allocate error skb\n");
+ }
+}
+
+static void apalis_tk1_k20_can_tx_work_handler(struct work_struct *ws)
+{
+ struct apalis_tk1_k20_priv *priv = container_of(ws,
+ struct apalis_tk1_k20_priv, tx_work);
+ struct net_device *net = priv->net;
+ struct can_frame *frame;
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ if (priv->tx_skb) {
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ apalis_tk1_k20_can_clean(net);
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+ if (frame->can_dlc > CAN_FRAME_MAX_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_LEN;
+ apalis_tk1_k20_can_hw_tx(net, frame, 0);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
+ }
+ }
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int apalis_tk1_k20_can_suspend(struct device *dev)
+{
+ struct apalis_tk1_k20_priv *priv = dev_get_drvdata(dev);
+ struct apalis_tk1_k20_can_platform_data *pdata = priv->pdata;
+
+ priv->force_quit = 1;
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ if (pdata->id == 0)
+ apalis_tk1_k20_irq_mask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ);
+ if (pdata->id == 1)
+ apalis_tk1_k20_irq_mask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ);
+ /* Disable interrupts */
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+ /* Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
+ if (netif_running(priv->net)) {
+ netif_device_detach(priv->net);
+
+ priv->after_suspend = AFTER_SUSPEND_UP;
+ } else {
+ priv->after_suspend = AFTER_SUSPEND_DOWN;
+ }
+
+ return 0;
+}
+
+static int apalis_tk1_k20_can_resume(struct device *dev)
+{
+ struct apalis_tk1_k20_priv *priv = dev_get_drvdata(dev);
+ struct apalis_tk1_k20_can_platform_data *pdata = priv->pdata;
+
+ if (priv->after_suspend & AFTER_SUSPEND_UP)
+ queue_work(priv->wq, &priv->restart_work);
+ else
+ priv->after_suspend = 0;
+
+ priv->force_quit = 0;
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ if (pdata->id == 0)
+ apalis_tk1_k20_irq_unmask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ);
+ if (pdata->id == 1)
+ apalis_tk1_k20_irq_unmask(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ);
+ /* Enable interrupts */
+ priv->can.state = CAN_STATE_STOPPED;
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(apalis_tk1_k20_can_pm_ops, apalis_tk1_k20_can_suspend,
+ apalis_tk1_k20_can_resume);
+
+#endif
+
+static void apalis_tk1_k20_can_restart_work_handler(struct work_struct *ws)
+{
+ struct apalis_tk1_k20_priv *priv = container_of(ws,
+ struct apalis_tk1_k20_priv, restart_work);
+ struct net_device *net = priv->net;
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ if (priv->after_suspend) {
+ mdelay(10);
+ apalis_tk1_k20_can_hw_reset(net);
+ apalis_tk1_k20_can_setup(net, priv);
+ if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
+ apalis_tk1_k20_can_set_normal_mode(net);
+ } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
+ netif_device_attach(net);
+ apalis_tk1_k20_can_clean(net);
+ apalis_tk1_k20_can_set_normal_mode(net);
+ netif_wake_queue(net);
+ }
+ priv->after_suspend = 0;
+ priv->force_quit = 0;
+ }
+
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ apalis_tk1_k20_can_clean(net);
+ netif_wake_queue(net);
+ apalis_tk1_k20_can_error_skb(net, CAN_ERR_RESTARTED, 0);
+ }
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+}
+
+static irqreturn_t apalis_tk1_k20_can_ist(int irq, void *dev_id)
+{
+ struct apalis_tk1_k20_priv *priv = dev_id;
+ struct net_device *net = priv->net;
+ int max_continuous_read = CAN_MAX_CONTINUOUS_READ;
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+ while (!priv->force_quit && max_continuous_read) {
+ enum can_state new_state;
+ int ret;
+ u32 intf, eflag;
+ u8 clear_intf = 0;
+ int can_id = 0, data1 = 0;
+
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ ret = apalis_tk1_k20_reg_read(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANREG
+ + APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), &intf);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+
+ if (ret) {
+ dev_err(&net->dev, "Communication error\n");
+ break;
+ }
+
+ max_continuous_read--;
+
+ intf &= CANCTRL_INTMASK;
+ /* receive */
+ if (intf & CANINTF_RX)
+ apalis_tk1_k20_can_hw_rx(net, 0);
+
+ /* any error or TX interrupt we need to clear? */
+ if (intf & (CANINTF_ERR | CANINTF_TX))
+ clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ if (clear_intf)
+ ret = apalis_tk1_k20_reg_rmw(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANREG +
+ APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id),
+ CANCTRL_INTMASK, 0x00);
+ if (ret) {
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ dev_err(&net->dev, "Communication error\n");
+ break;
+ }
+ ret = apalis_tk1_k20_reg_read(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CANERR +
+ APALIS_TK1_K20_CAN_DEV_OFFSET(
+ priv->pdata->id), &eflag);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ if (ret) {
+ dev_err(&net->dev, "Communication error\n");
+ break;
+ }
+ /* Update can state */
+ if (intf & CANINTF_ERR) {
+ if (eflag & EFLG_TXBO) {
+ new_state = CAN_STATE_BUS_OFF;
+ can_id |= CAN_ERR_BUSOFF;
+ } else if (eflag & EFLG_TXEP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_TX_PASSIVE;
+ } else if (eflag & EFLG_RXEP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_PASSIVE;
+ } else if (eflag & EFLG_TXWAR) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_TX_WARNING;
+ } else if (eflag & EFLG_RXWAR) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+
+ /* Update can state statistics */
+ switch (priv->can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ if (new_state >= CAN_STATE_ERROR_WARNING &&
+ new_state <= CAN_STATE_BUS_OFF)
+ priv->can.can_stats.error_warning++;
+ case CAN_STATE_ERROR_WARNING: /* fallthrough */
+ if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+ new_state <= CAN_STATE_BUS_OFF)
+ priv->can.can_stats.error_passive++;
+ break;
+ default:
+ break;
+ }
+ priv->can.state = new_state;
+
+ if (intf & CANINTF_ERR) {
+ /* Handle overflow counters */
+ if (eflag & EFLG_RXOVR) {
+ if (eflag & EFLG_RXOVR) {
+ net->stats.rx_over_errors++;
+ net->stats.rx_errors++;
+ }
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ apalis_tk1_k20_can_error_skb(net, can_id, data1);
+ }
+
+ if (priv->can.state == CAN_STATE_BUS_OFF &&
+ priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
+ can_bus_off(net);
+ break;
+ }
+
+ if (intf == 0)
+ break;
+
+ if (intf & CANINTF_TX) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += priv->tx_len - 1;
+ can_led_event(net, CAN_LED_EVENT_TX);
+ if (priv->tx_len) {
+ can_get_echo_skb(net, 0);
+ priv->tx_len = 0;
+ }
+ netif_wake_queue(net);
+ }
+ }
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+ return IRQ_HANDLED;
+}
+
+static int apalis_tk1_k20_can_open(struct net_device *net)
+{
+ struct apalis_tk1_k20_priv *priv = netdev_priv(net);
+ struct apalis_tk1_k20_can_platform_data *pdata = priv->pdata;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret) {
+ dev_err(&net->dev, "unable to initialize CAN\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->apalis_tk1_k20_can_lock);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+ apalis_tk1_k20_lock(priv->apalis_tk1_k20);
+ if (pdata->id == 0)
+ ret = apalis_tk1_k20_irq_request(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN0_IRQ,
+ apalis_tk1_k20_can_ist,
+ DEVICE_NAME, priv);
+ if (pdata->id == 1)
+ ret = apalis_tk1_k20_irq_request(priv->apalis_tk1_k20,
+ APALIS_TK1_K20_CAN1_IRQ,
+ apalis_tk1_k20_can_ist,
+ DEVICE_NAME, priv);
+ apalis_tk1_k20_unlock(priv->apalis_tk1_k20);
+ if (ret) {
+ dev_err(&net->dev, "failed to acquire IRQ\n");
+ close_candev(net);
+ goto open_unlock;
+ }
+
+ priv->wq = create_freezable_workqueue("apalis_tk1_k20_wq");
+ INIT_WORK(&priv->tx_work, apalis_tk1_k20_can_tx_work_handler);
+ INIT_WORK(&priv->restart_work, apalis_tk1_k20_can_restart_work_handler);
+
+ ret = apalis_tk1_k20_can_hw_reset(net);
+ if (ret) {
+ apalis_tk1_k20_can_open_clean(net);
+ goto open_unlock;
+ }
+
+ ret = apalis_tk1_k20_can_setup(net, priv);
+ if (ret) {
+ apalis_tk1_k20_can_open_clean(net);
+ goto open_unlock;
+ }
+
+ ret = apalis_tk1_k20_can_set_normal_mode(net);
+ if (ret) {
+ apalis_tk1_k20_can_open_clean(net);
+ goto open_unlock;
+ }
+
+ can_led_event(net, CAN_LED_EVENT_OPEN);
+
+ netif_wake_queue(net);
+
+open_unlock:
+ mutex_unlock(&priv->apalis_tk1_k20_can_lock);
+ return ret;
+}
+
+static const struct net_device_ops apalis_tk1_k20_netdev_ops = {
+ .ndo_open = apalis_tk1_k20_can_open,
+ .ndo_stop = apalis_tk1_k20_can_stop,
+ .ndo_start_xmit = apalis_tk1_k20_can_hard_start_xmit,
+};
+
+static int apalis_tk1_k20_can_probe(struct platform_device *pdev)
+{
+ struct net_device *net;
+ struct apalis_tk1_k20_priv *priv;
+ struct apalis_tk1_k20_can_platform_data *pdata =
+ pdev->dev.platform_data;
+ int ret = -ENODEV;
+
+ if (!pdata) {
+ pdata = kmalloc(sizeof(struct apalis_tk1_k20_can_platform_data),
+ GFP_KERNEL);
+ if (pdev->id == -1)
+ pdata->id = 0;
+ if (pdev->id >= 0 && pdev->id <= K20_CAN_MAX_ID)
+ pdata->id = pdev->id;
+ else
+ goto error_out;
+ }
+
+ if (pdata->id > K20_CAN_MAX_ID)
+ goto error_out;
+ /* Allocate can/net device */
+ net = alloc_candev(sizeof(struct apalis_tk1_k20_priv), TX_ECHO_SKB_MAX);
+ if (!net) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
+
+ net->netdev_ops = &apalis_tk1_k20_netdev_ops;
+ net->flags |= IFF_ECHO;
+
+ priv = netdev_priv(net);
+ priv->can.bittiming_const = &apalis_tk1_k20_can_bittiming_const;
+ priv->can.do_set_mode = apalis_tk1_k20_can_do_set_mode;
+ priv->can.clock.freq = 8000000;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
+ priv->net = net;
+ priv->pdata = pdata;
+ priv->apalis_tk1_k20 = dev_get_drvdata(pdev->dev.parent);
+
+ mutex_init(&priv->apalis_tk1_k20_can_lock);
+
+ SET_NETDEV_DEV(net, &pdev->dev);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = register_candev(net);
+ if (ret)
+ goto error_probe;
+
+ devm_can_led_init(net);
+
+ dev_info(&pdev->dev, "probed %d\n", pdev->id);
+
+ return ret;
+
+error_probe:
+ free_candev(net);
+error_out:
+ return ret;
+}
+
+static int apalis_tk1_k20_can_remove(struct platform_device *pdev)
+{
+ struct apalis_tk1_k20_priv *priv = platform_get_drvdata(pdev);
+ struct net_device *net = priv->net;
+
+ unregister_candev(net);
+ free_candev(net);
+
+ return 0;
+}
+
+static const struct platform_device_id apalis_tk1_k20_can_idtable[] = {
+ {.name = "apalis-tk1-k20-can", },
+ { /* sentinel */}
+};
+
+MODULE_DEVICE_TABLE(platform, apalis_tk1_k20_can_idtable);
+
+static struct platform_driver apalis_tk1_k20_can_driver = {
+ .id_table = apalis_tk1_k20_can_idtable,
+ .remove = __exit_p(apalis_tk1_k20_can_remove),
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &apalis_tk1_k20_can_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver_probe(apalis_tk1_k20_can_driver,
+ &apalis_tk1_k20_can_probe);
+
+MODULE_DESCRIPTION("CAN driver for K20 MCU on Apalis TK1");
+MODULE_AUTHOR("Dominik Sliwa <dominik.sliwa@toradex.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285bb..1d2b2a359300 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,4 +34,6 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o \
+ e1000_api.o e1000_manage.o igb_debugfs.o \
+ igb_param.o igb_procfs.o igb_vmdq.o kcompat.o \ No newline at end of file
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ff6a17cb1362..b4b973e57e82 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,90 +12,123 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-/* e1000_82575
- * e1000_82576
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ * 82580 Gigabit Network Connection
+ * I350 Gigabit Network Connection
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include <linux/i2c.h>
-
-#include "e1000_mac.h"
-#include "e1000_82575.h"
+#include "e1000_api.h"
#include "e1000_i210.h"
-static s32 igb_get_invariants_82575(struct e1000_hw *);
-static s32 igb_acquire_phy_82575(struct e1000_hw *);
-static void igb_release_phy_82575(struct e1000_hw *);
-static s32 igb_acquire_nvm_82575(struct e1000_hw *);
-static void igb_release_nvm_82575(struct e1000_hw *);
-static s32 igb_check_for_link_82575(struct e1000_hw *);
-static s32 igb_get_cfg_done_82575(struct e1000_hw *);
-static s32 igb_init_hw_82575(struct e1000_hw *);
-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
-static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
-static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
-static s32 igb_reset_hw_82575(struct e1000_hw *);
-static s32 igb_reset_hw_82580(struct e1000_hw *);
-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
-static s32 igb_setup_copper_link_82575(struct e1000_hw *);
-static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
-static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
- u16 *);
-static s32 igb_get_phy_id_82575(struct e1000_hw *);
-static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
-static bool igb_sgmii_active_82575(struct e1000_hw *);
-static s32 igb_reset_init_script_82575(struct e1000_hw *);
-static s32 igb_read_mac_addr_82575(struct e1000_hw *);
-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
+static void e1000_release_phy_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
+static void e1000_release_nvm_82575(struct e1000_hw *hw);
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 *data);
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw,
+ u32 offset, u16 data);
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+ bool active);
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_get_media_type_82575(struct e1000_hw *hw);
+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw);
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+ u32 offset, u16 data);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+ u16 offset);
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+static void e1000_clear_vfta_i350(struct e1000_hw *hw);
+
+static void e1000_i2c_start(struct e1000_hw *hw);
+static void e1000_i2c_stop(struct e1000_hw *hw);
+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data);
+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data);
+static s32 e1000_get_i2c_ack(struct e1000_hw *hw);
+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data);
+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data);
+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl);
+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data);
+static bool e1000_get_i2c_data(u32 *i2cctl);
+
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
#define E1000_82580_RXPBS_TABLE_SIZE \
- (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+ (sizeof(e1000_82580_rxpbs_table) / \
+ sizeof(e1000_82580_rxpbs_table[0]))
/**
- * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
* @hw: pointer to the HW structure
*
* Called to determine if the I2C pins are being used for I2C or as an
* external MDIO interface since the two options are mutually exclusive.
**/
-static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
{
u32 reg = 0;
bool ext_mdio = false;
+ DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
switch (hw->mac.type) {
case e1000_82575:
case e1000_82576:
- reg = rd32(E1000_MDIC);
+ reg = E1000_READ_REG(hw, E1000_MDIC);
ext_mdio = !!(reg & E1000_MDIC_DEST);
break;
case e1000_82580:
@@ -103,7 +136,7 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
case e1000_i354:
case e1000_i210:
case e1000_i211:
- reg = rd32(E1000_MDICNFG);
+ reg = E1000_READ_REG(hw, E1000_MDICNFG);
ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
break;
default:
@@ -113,109 +146,161 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
}
/**
- * igb_init_phy_params_82575 - Init PHY func ptrs.
+ * e1000_init_phy_params_82575 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u32 ctrl_ext;
+ DEBUGFUNC("e1000_init_phy_params_82575");
+
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic;
+
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
goto out;
}
+ phy->ops.power_up = e1000_power_up_phy_copper;
+ phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ phy->ops.acquire = e1000_acquire_phy_82575;
+ phy->ops.check_reset_block = e1000_check_reset_block_generic;
+ phy->ops.commit = e1000_phy_sw_reset_generic;
+ phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
+ phy->ops.release = e1000_release_phy_82575;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
- if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ if (e1000_sgmii_active_82575(hw)) {
+ phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
- phy->ops.reset = igb_phy_hw_reset;
+ phy->ops.reset = e1000_phy_hw_reset_generic;
ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
- wr32(E1000_CTRL_EXT, ctrl_ext);
- igb_reset_mdicnfg_82580(hw);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ e1000_reset_mdicnfg_82580(hw);
- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
} else {
switch (hw->mac.type) {
case e1000_82580:
case e1000_i350:
case e1000_i354:
- phy->ops.read_reg = igb_read_phy_reg_82580;
- phy->ops.write_reg = igb_write_phy_reg_82580;
+ phy->ops.read_reg = e1000_read_phy_reg_82580;
+ phy->ops.write_reg = e1000_write_phy_reg_82580;
break;
case e1000_i210:
case e1000_i211:
- phy->ops.read_reg = igb_read_phy_reg_gs40g;
- phy->ops.write_reg = igb_write_phy_reg_gs40g;
+ phy->ops.read_reg = e1000_read_phy_reg_gs40g;
+ phy->ops.write_reg = e1000_write_phy_reg_gs40g;
break;
default:
- phy->ops.read_reg = igb_read_phy_reg_igp;
- phy->ops.write_reg = igb_write_phy_reg_igp;
+ phy->ops.read_reg = e1000_read_phy_reg_igp;
+ phy->ops.write_reg = e1000_write_phy_reg_igp;
}
}
- /* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
-
/* Set phy->phy_addr and phy->id. */
- ret_val = igb_get_phy_id_82575(hw);
- if (ret_val)
- return ret_val;
+ ret_val = e1000_get_phy_id_82575(hw);
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
case M88E1111_I_PHY_ID:
phy->type = e1000_phy_m88;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- if (phy->id != M88E1111_I_PHY_ID)
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID ||
+ phy->id == M88E1340M_E_PHY_ID)
+ phy->ops.get_cable_length =
+ e1000_get_cable_length_m88_gen2;
+ else if (phy->id == M88E1543_E_PHY_ID ||
+ phy->id == M88E1512_E_PHY_ID)
phy->ops.get_cable_length =
- igb_get_cable_length_m88_gen2;
+ e1000_get_cable_length_m88_gen2;
else
- phy->ops.get_cable_length = igb_get_cable_length_m88;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+ /* Check if this PHY is confgured for media swap. */
+ if (phy->id == M88E1112_E_PHY_ID) {
+ u16 data;
+
+ ret_val = phy->ops.write_reg(hw,
+ E1000_M88E1112_PAGE_ADDR,
+ 2);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.read_reg(hw,
+ E1000_M88E1112_MAC_CTRL_1,
+ &data);
+ if (ret_val)
+ goto out;
+
+ data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+ E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+ if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+ data == E1000_M88E1112_AUTO_COPPER_BASEX)
+ hw->mac.ops.check_for_link =
+ e1000_check_for_link_media_swap;
+ }
+ if (phy->id == M88E1512_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1512_phy(hw);
+ if (ret_val)
+ goto out;
+ }
+ if (phy->id == M88E1543_E_PHY_ID) {
+ ret_val = e1000_initialize_M88E1543_phy(hw);
+ if (ret_val)
+ goto out;
+ }
break;
case IGP03E1000_E_PHY_ID:
+ case IGP04E1000_E_PHY_ID:
phy->type = e1000_phy_igp_3;
- phy->ops.get_phy_info = igb_get_phy_info_igp;
- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ phy->ops.check_polarity = e1000_check_polarity_igp;
+ phy->ops.get_info = e1000_get_phy_info_igp;
+ phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
break;
case I82580_I_PHY_ID:
case I350_I_PHY_ID:
phy->type = e1000_phy_82580;
+ phy->ops.check_polarity = e1000_check_polarity_82577;
phy->ops.force_speed_duplex =
- igb_phy_force_speed_duplex_82580;
- phy->ops.get_cable_length = igb_get_cable_length_82580;
- phy->ops.get_phy_info = igb_get_phy_info_82580;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ e1000_phy_force_speed_duplex_82577;
+ phy->ops.get_cable_length = e1000_get_cable_length_82577;
+ phy->ops.get_info = e1000_get_phy_info_82577;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
break;
case I210_I_PHY_ID:
phy->type = e1000_phy_i210;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ phy->ops.check_polarity = e1000_check_polarity_m88;
+ phy->ops.get_info = e1000_get_phy_info_m88;
+ phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
break;
default:
ret_val = -E1000_ERR_PHY;
@@ -227,18 +312,21 @@ out:
}
/**
- * igb_init_nvm_params_82575 - Init NVM func ptrs.
+ * e1000_init_nvm_params_82575 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = rd32(E1000_EECD);
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
u16 size;
+ DEBUGFUNC("e1000_init_nvm_params_82575");
+
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
- /* Added to a constant, "size" becomes the left-shift value
+ /*
+ * Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -277,251 +365,187 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->type = e1000_nvm_flash_hw;
}
- /* NVM Function Pointers */
+ /* Function Pointers */
+ nvm->ops.acquire = e1000_acquire_nvm_82575;
+ nvm->ops.release = e1000_release_nvm_82575;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = e1000_read_nvm_eerd;
+ else
+ nvm->ops.read = e1000_read_nvm_spi;
+
+ nvm->ops.write = e1000_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_generic;
+ nvm->ops.update = e1000_update_nvm_checksum_generic;
+ nvm->ops.valid_led_default = e1000_valid_led_default_82575;
+
+ /* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
- nvm->ops.validate = igb_validate_nvm_checksum_82580;
- nvm->ops.update = igb_update_nvm_checksum_82580;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
+ nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+ nvm->ops.update = e1000_update_nvm_checksum_82580;
break;
- case e1000_i354:
case e1000_i350:
- nvm->ops.validate = igb_validate_nvm_checksum_i350;
- nvm->ops.update = igb_update_nvm_checksum_i350;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
- break;
- case e1000_i210:
- nvm->ops.validate = igb_validate_nvm_checksum_i210;
- nvm->ops.update = igb_update_nvm_checksum_i210;
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_srrd_i210;
- nvm->ops.write = igb_write_nvm_srwr_i210;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- break;
- case e1000_i211:
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_i211;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- nvm->ops.validate = NULL;
- nvm->ops.update = NULL;
- nvm->ops.write = NULL;
+ case e1000_i354:
+ nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+ nvm->ops.update = e1000_update_nvm_checksum_i350;
break;
default:
- nvm->ops.validate = igb_validate_nvm_checksum;
- nvm->ops.update = igb_update_nvm_checksum;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
}
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_init_mac_params_82575 - Init MAC func ptrs.
+ * e1000_init_mac_params_82575 - Init MAC func ptrs.
* @hw: pointer to the HW structure
**/
-static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ DEBUGFUNC("e1000_init_mac_params_82575");
+
+ /* Derives media type */
+ e1000_get_media_type_82575(hw);
/* Set mta register count */
mac->mta_reg_count = 128;
+ /* Set uta register count */
+ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
/* Set rar entry count */
- switch (mac->type) {
- case e1000_82576:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ if (mac->type == e1000_82576)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
- break;
- case e1000_82580:
+ if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
- break;
- case e1000_i350:
- case e1000_i354:
+ if (mac->type == e1000_i350 || mac->type == e1000_i354)
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
- break;
- default:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
- break;
- }
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = igb_reset_hw_82580;
- else
- mac->ops.reset_hw = igb_reset_hw_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
+ /* Enable EEE default settings for EEE supported devices */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
- } else {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
- }
+ /* Allow a single clear of the SW semaphore on I210 and newer */
+ if (mac->type >= e1000_i210)
+ dev_spec->clear_semaphore_once = true;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = true;
- /* Set if manageability features are enabled. */
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
mac->arc_subsystem_valid =
- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* enable EEE on i350 parts and later parts */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
+ !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK);
+
+ /* Function pointers */
+
+ /* bus type/speed/width */
+ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = e1000_reset_hw_82580;
else
- dev_spec->eee_disable = true;
- /* Allow a single clear of the SW semaphore on I210 and newer */
- if (mac->type >= e1000_i210)
- dev_spec->clear_semaphore_once = true;
+ mac->ops.reset_hw = e1000_reset_hw_82575;
+ /* hw initialization */
+ if ((mac->type == e1000_i210) || (mac->type == e1000_i211))
+ mac->ops.init_hw = e1000_init_hw_i210;
+ else
+ mac->ops.init_hw = e1000_init_hw_82575;
+ /* link setup */
+ mac->ops.setup_link = e1000_setup_link_generic;
/* physical interface link setup */
mac->ops.setup_physical_interface =
(hw->phy.media_type == e1000_media_type_copper)
- ? igb_setup_copper_link_82575
- : igb_setup_serdes_link_82575;
-
- return 0;
-}
-
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
- s32 ret_val;
- u32 ctrl_ext = 0;
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
- case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
- case E1000_DEV_ID_I354_SGMII:
- case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
- mac->type = e1000_i354;
- break;
- default:
- return -E1000_ERR_MAC_INIT;
- break;
+ ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575;
+ /* physical interface shutdown */
+ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+ /* physical interface power up */
+ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+ /* check for link */
+ mac->ops.check_for_link = e1000_check_for_link_82575;
+ /* read mac address */
+ mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+ /* configure collision distance */
+ mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+ /* multicast address update */
+ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+ if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_i350;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_i350;
+ } else {
+ /* writing VFTA */
+ mac->ops.write_vfta = e1000_write_vfta_generic;
+ /* clearing VFTA */
+ mac->ops.clear_vfta = e1000_clear_vfta_generic;
}
-
- /* Set media type */
- /* The 82575 uses bits 22:23 for link mode. The mode can be changed
- * based on the EEPROM. We cannot rely upon device ID. There
- * is no distinguishable difference between fiber and internal
- * SerDes mode on the 82575. There can be an external PHY attached
- * on the SGMII interface. For this, we'll set sgmii_active to true.
- */
- hw->phy.media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = false;
-
- ctrl_ext = rd32(E1000_CTRL_EXT);
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- dev_spec->sgmii_active = true;
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- break;
+ if (hw->mac.type >= e1000_82580)
+ mac->ops.validate_mdi_setting =
+ e1000_validate_mdi_setting_crossover_generic;
+ /* ID LED init */
+ mac->ops.id_led_init = e1000_id_led_init_generic;
+ /* blink LED */
+ mac->ops.blink_led = e1000_blink_led_generic;
+ /* setup LED */
+ mac->ops.setup_led = e1000_setup_led_generic;
+ /* cleanup LED */
+ mac->ops.cleanup_led = e1000_cleanup_led_generic;
+ /* turn on/off LED */
+ mac->ops.led_on = e1000_led_on_generic;
+ mac->ops.led_off = e1000_led_off_generic;
+ /* clear hardware counters */
+ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+ /* link info */
+ mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+ /* get thermal sensor data */
+ mac->ops.get_thermal_sensor_data =
+ e1000_get_thermal_sensor_data_generic;
+ mac->ops.init_thermal_sensor_thresh =
+ e1000_init_thermal_sensor_thresh_generic;
+ /* acquire SW_FW sync */
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
}
- /* mac initialization and operations */
- ret_val = igb_init_mac_params_82575(hw);
- if (ret_val)
- goto out;
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
- /* NVM initialization */
- ret_val = igb_init_nvm_params_82575(hw);
- if (ret_val)
- goto out;
-
- /* if part supports SR-IOV then initialize mailbox parameters */
- switch (mac->type) {
- case e1000_82576:
- case e1000_i350:
- igb_init_mbx_params_pf(hw);
- break;
- default:
- break;
- }
+ return E1000_SUCCESS;
+}
- /* setup PHY parameters */
- ret_val = igb_init_phy_params_82575(hw);
+/**
+ * e1000_init_function_pointers_82575 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_init_function_pointers_82575");
-out:
- return ret_val;
+ hw->mac.ops.init_params = e1000_init_mac_params_82575;
+ hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+ hw->phy.ops.init_params = e1000_init_phy_params_82575;
+ hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
}
/**
- * igb_acquire_phy_82575 - Acquire rights to access PHY
+ * e1000_acquire_phy_82575 - Acquire rights to access PHY
* @hw: pointer to the HW structure
*
- * Acquire access rights to the correct PHY. This is a
- * function pointer entry point called by the api module.
+ * Acquire access rights to the correct PHY.
**/
-static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
{
u16 mask = E1000_SWFW_PHY0_SM;
+ DEBUGFUNC("e1000_acquire_phy_82575");
+
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
else if (hw->bus.func == E1000_FUNC_2)
@@ -533,16 +557,17 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
}
/**
- * igb_release_phy_82575 - Release rights to access PHY
+ * e1000_release_phy_82575 - Release rights to access PHY
* @hw: pointer to the HW structure
*
- * A wrapper to release access rights to the correct PHY. This is a
- * function pointer entry point called by the api module.
+ * A wrapper to release access rights to the correct PHY.
**/
-static void igb_release_phy_82575(struct e1000_hw *hw)
+static void e1000_release_phy_82575(struct e1000_hw *hw)
{
u16 mask = E1000_SWFW_PHY0_SM;
+ DEBUGFUNC("e1000_release_phy_82575");
+
if (hw->bus.func == E1000_FUNC_1)
mask = E1000_SWFW_PHY1_SM;
else if (hw->bus.func == E1000_FUNC_2)
@@ -554,7 +579,7 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
}
/**
- * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
@@ -562,13 +587,15 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
* Reads the PHY register at offset using the serial gigabit media independent
* interface and stores the retrieved information in data.
**/
-static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 *data)
{
s32 ret_val = -E1000_ERR_PARAM;
+ DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
- hw_dbg("PHY Address %u is out of range\n", offset);
+ DEBUGOUT1("PHY Address %u is out of range\n", offset);
goto out;
}
@@ -576,7 +603,7 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
if (ret_val)
goto out;
- ret_val = igb_read_phy_reg_i2c(hw, offset, data);
+ ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
hw->phy.ops.release(hw);
@@ -585,7 +612,7 @@ out:
}
/**
- * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
@@ -593,14 +620,15 @@ out:
* Writes the data to PHY register at the offset using the serial gigabit
* media independent interface.
**/
-static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
u16 data)
{
s32 ret_val = -E1000_ERR_PARAM;
+ DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
- hw_dbg("PHY Address %d is out of range\n", offset);
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
goto out;
}
@@ -608,7 +636,7 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
if (ret_val)
goto out;
- ret_val = igb_write_phy_reg_i2c(hw, offset, data);
+ ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
hw->phy.ops.release(hw);
@@ -617,37 +645,44 @@ out:
}
/**
- * igb_get_phy_id_82575 - Retrieve PHY addr and id
+ * e1000_get_phy_id_82575 - Retrieve PHY addr and id
* @hw: pointer to the HW structure
*
* Retrieves the PHY address and ID for both PHY's which do and do not use
* sgmi interface.
**/
-static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 phy_id;
u32 ctrl_ext;
u32 mdic;
- /* For SGMII PHYs, we try the list of possible addresses until
+ DEBUGFUNC("e1000_get_phy_id_82575");
+
+ /* some i354 devices need an extra read for phy id */
+ if (hw->mac.type == e1000_i354)
+ e1000_get_phy_id(hw);
+
+ /*
+ * For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
* (e.g. integrated copper PHYs), an address of 1 should
* work. The result of this function should mean phy->phy_addr
* and phy->id are set correctly.
*/
- if (!(igb_sgmii_active_82575(hw))) {
+ if (!e1000_sgmii_active_82575(hw)) {
phy->addr = 1;
- ret_val = igb_get_phy_id(hw);
+ ret_val = e1000_get_phy_id(hw);
goto out;
}
- if (igb_sgmii_uses_mdio_82575(hw)) {
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
switch (hw->mac.type) {
case e1000_82575:
case e1000_82576:
- mdic = rd32(E1000_MDIC);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
mdic &= E1000_MDIC_PHY_MASK;
phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
break;
@@ -656,7 +691,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
case e1000_i354:
case e1000_i210:
case e1000_i211:
- mdic = rd32(E1000_MDICNFG);
+ mdic = E1000_READ_REG(hw, E1000_MDICNFG);
mdic &= E1000_MDICNFG_PHY_MASK;
phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
break;
@@ -665,31 +700,35 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
goto out;
break;
}
- ret_val = igb_get_phy_id(hw);
+ ret_val = e1000_get_phy_id(hw);
goto out;
}
/* Power on sgmii phy if it is disabled */
- ctrl_ext = rd32(E1000_CTRL_EXT);
- wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
- wrfl();
- msleep(300);
-
- /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+ ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(300);
+
+ /*
+ * The address field in the I2CCMD register is 3 bits and 0 is invalid.
* Therefore, we need to test 1-7
*/
for (phy->addr = 1; phy->addr < 8; phy->addr++) {
- ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
- if (ret_val == 0) {
- hw_dbg("Vendor ID 0x%08X read at address %u\n",
- phy_id, phy->addr);
- /* At the time of this writing, The M88 part is
+ ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+ if (ret_val == E1000_SUCCESS) {
+ DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+ phy_id, phy->addr);
+ /*
+ * At the time of this writing, The M88 part is
* the only supported SGMII PHY product.
*/
if (phy_id == M88_VENDOR)
break;
} else {
- hw_dbg("PHY address %u was unreadable\n", phy->addr);
+ DEBUGOUT1("PHY address %u was unreadable\n",
+ phy->addr);
}
}
@@ -697,49 +736,60 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
if (phy->addr == 8) {
phy->addr = 0;
ret_val = -E1000_ERR_PHY;
- goto out;
} else {
- ret_val = igb_get_phy_id(hw);
+ ret_val = e1000_get_phy_id(hw);
}
/* restore previous sfp cage power state */
- wr32(E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
out:
return ret_val;
}
/**
- * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
* @hw: pointer to the HW structure
*
* Resets the PHY using the serial gigabit media independent interface.
**/
-static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
{
- s32 ret_val;
+ s32 ret_val = E1000_SUCCESS;
+ struct e1000_phy_info *phy = &hw->phy;
- /* This isn't a true "hard" reset, but is the only reset
+ DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+ /*
+ * This isn't a true "hard" reset, but is the only reset
* available to us at this time.
*/
- hw_dbg("Soft resetting SGMII attached PHY...\n");
+ DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+ if (!(hw->phy.ops.write_reg))
+ goto out;
- /* SFP documentation requires the following to configure the SPF module
+ /*
+ * SFP documentation requires the following to configure the SPF module
* to work on SGMII. No further documentation is given.
*/
ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
if (ret_val)
goto out;
- ret_val = igb_phy_sw_reset(hw);
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ goto out;
+ if (phy->id == M88E1512_E_PHY_ID)
+ ret_val = e1000_initialize_M88E1512_phy(hw);
out:
return ret_val;
}
/**
- * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
@@ -751,12 +801,17 @@ out:
* This is a function pointer entry point only called by
* PHY setup routines.
**/
-static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
+ s32 ret_val = E1000_SUCCESS;
u16 data;
+ DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+ if (!(hw->phy.ops.read_reg))
+ goto out;
+
ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
goto out;
@@ -764,47 +819,52 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
if (active) {
data |= IGP02E1000_PM_D0_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
+ data);
if (ret_val)
goto out;
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ &data);
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
- data);
+ data);
if (ret_val)
goto out;
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ data);
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG, &data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG, data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
if (ret_val)
goto out;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG, &data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG, data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ data);
if (ret_val)
goto out;
}
@@ -815,7 +875,7 @@ out:
}
/**
- * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
@@ -827,13 +887,14 @@ out:
* This is a function pointer entry point only called by
* PHY setup routines.
**/
-static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
- u16 data;
+ u32 data;
+
+ DEBUGFUNC("e1000_set_d0_lplu_state_82580");
- data = rd32(E1000_82580_PHY_POWER_MGMT);
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
if (active) {
data |= E1000_82580_PM_D0_LPLU;
@@ -843,7 +904,8 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
} else {
data &= ~E1000_82580_PM_D0_LPLU;
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -851,14 +913,15 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
if (phy->smart_speed == e1000_smart_speed_on)
data |= E1000_82580_PM_SPD;
else if (phy->smart_speed == e1000_smart_speed_off)
- data &= ~E1000_82580_PM_SPD; }
+ data &= ~E1000_82580_PM_SPD;
+ }
- wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return E1000_SUCCESS;
}
/**
- * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
* @hw: pointer to the HW structure
* @active: boolean used to enable/disable lplu
*
@@ -871,17 +934,19 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
-static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
- u16 data;
+ u32 data;
- data = rd32(E1000_82580_PHY_POWER_MGMT);
+ DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+ data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
if (!active) {
data &= ~E1000_82580_PM_D3_LPLU;
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /*
+ * LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -898,12 +963,12 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD;
}
- wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+ return E1000_SUCCESS;
}
/**
- * igb_acquire_nvm_82575 - Request for access to EEPROM
+ * e1000_acquire_nvm_82575 - Request for access to EEPROM
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
@@ -911,213 +976,341 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
{
- s32 ret_val;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_acquire_nvm_82575");
- ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
+ ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
if (ret_val)
goto out;
- ret_val = igb_acquire_nvm(hw);
+ /*
+ * Check if there is some access
+ * error this access may hook on
+ */
+ if (hw->mac.type == e1000_i350) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+ E1000_EECD_TIMEOUT)) {
+ /* Clear all access error flags */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_ERROR_CLR);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+ if (hw->mac.type == e1000_82580) {
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+ if (eecd & E1000_EECD_BLOCKED) {
+ /* Clear access error flag */
+ E1000_WRITE_REG(hw, E1000_EECD, eecd |
+ E1000_EECD_BLOCKED);
+ DEBUGOUT("Nvm bit banging access error detected and cleared.\n");
+ }
+ }
+
+ ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
out:
return ret_val;
}
/**
- * igb_release_nvm_82575 - Release exclusive access to EEPROM
+ * e1000_release_nvm_82575 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-static void igb_release_nvm_82575(struct e1000_hw *hw)
+static void e1000_release_nvm_82575(struct e1000_hw *hw)
{
- igb_release_nvm(hw);
- hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+ DEBUGFUNC("e1000_release_nvm_82575");
+
+ e1000_release_nvm_generic(hw);
+
+ e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
}
/**
- * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
-static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
- s32 ret_val = 0;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 ret_val = E1000_SUCCESS;
+ s32 i = 0, timeout = 200;
+
+ DEBUGFUNC("e1000_acquire_swfw_sync_82575");
while (i < timeout) {
- if (igb_get_hw_semaphore(hw)) {
+ if (e1000_get_hw_semaphore_generic(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
- swfw_sync = rd32(E1000_SW_FW_SYNC);
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
- /* Firmware currently using resource (fwmask)
+ /*
+ * Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
- igb_put_hw_semaphore(hw);
- mdelay(5);
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
i++;
}
if (i == timeout) {
- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
- wr32(E1000_SW_FW_SYNC, swfw_sync);
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
- igb_put_hw_semaphore(hw);
+ e1000_put_hw_semaphore_generic(hw);
out:
return ret_val;
}
/**
- * igb_release_swfw_sync_82575 - Release SW/FW semaphore
+ * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
-static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+ while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
+ ; /* Empty */
- swfw_sync = rd32(E1000_SW_FW_SYNC);
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
- wr32(E1000_SW_FW_SYNC, swfw_sync);
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
- igb_put_hw_semaphore(hw);
+ e1000_put_hw_semaphore_generic(hw);
}
/**
- * igb_get_cfg_done_82575 - Read config done bit
+ * e1000_get_cfg_done_82575 - Read config done bit
* @hw: pointer to the HW structure
*
* Read the management control register for the config done bit for
* completion status. NOTE: silicon which is EEPROM-less will fail trying
* to read the config done bit, so an error is *ONLY* logged and returns
- * 0. If we were to return with error, EEPROM-less silicon
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
* would not be able to be reset or change link.
**/
-static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
- if (hw->bus.func == 1)
+ DEBUGFUNC("e1000_get_cfg_done_82575");
+
+ if (hw->bus.func == E1000_FUNC_1)
mask = E1000_NVM_CFG_DONE_PORT_1;
else if (hw->bus.func == E1000_FUNC_2)
mask = E1000_NVM_CFG_DONE_PORT_2;
else if (hw->bus.func == E1000_FUNC_3)
mask = E1000_NVM_CFG_DONE_PORT_3;
-
while (timeout) {
- if (rd32(E1000_EEMNGCTL) & mask)
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ msec_delay(1);
timeout--;
}
if (!timeout)
- hw_dbg("MNG configuration cycle has not completed.\n");
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
/* If EEPROM is not marked present, init the PHY manually */
- if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
(hw->phy.type == e1000_phy_igp_3))
- igb_phy_init_script_igp3(hw);
+ e1000_phy_init_script_igp3(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_get_link_up_info_82575");
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+ duplex);
return ret_val;
}
/**
- * igb_check_for_link_82575 - Check for link
+ * e1000_check_for_link_82575 - Check for link
* @hw: pointer to the HW structure
*
* If sgmii is enabled, then use the pcs register to determine link, otherwise
* use the generic interface for determining link.
**/
-static s32 igb_check_for_link_82575(struct e1000_hw *hw)
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
{
s32 ret_val;
u16 speed, duplex;
+ DEBUGFUNC("e1000_check_for_link_82575");
+
if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
- /* Use this flag to determine if link needs to be checked or
- * not. If we have link clear the flag so that we do not
+ ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+ &duplex);
+ /*
+ * Use this flag to determine if link needs to be checked or
+ * not. If we have link clear the flag so that we do not
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
- /* Configure Flow Control now that Auto-Neg has completed.
+ /*
+ * Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
*/
- ret_val = igb_config_fc_after_link_up(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
if (ret_val)
- hw_dbg("Error configuring flow control\n");
+ DEBUGOUT("Error configuring flow control\n");
} else {
- ret_val = igb_check_for_copper_link(hw);
+ ret_val = e1000_check_for_copper_link_generic(hw);
}
return ret_val;
}
/**
- * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * e1000_check_for_link_media_swap - Check which M88E1112 interface linked
+ * @hw: pointer to the HW structure
+ *
+ * Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ u8 port = 0;
+
+ DEBUGFUNC("e1000_check_for_link_media_swap");
+
+ /* Check for copper. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_COPPER;
+
+ /* Check for other. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+ if (ret_val)
+ return ret_val;
+
+ if (data & E1000_M88E1112_STATUS_LINK)
+ port = E1000_MEDIA_PORT_OTHER;
+
+ /* Determine if a swap needs to happen. */
+ if (port && (hw->dev_spec._82575.media_port != port)) {
+ hw->dev_spec._82575.media_port = port;
+ hw->dev_spec._82575.media_changed = true;
+ }
+
+ if (port == E1000_MEDIA_PORT_COPPER) {
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ e1000_check_for_link_82575(hw);
+ } else {
+ e1000_check_for_link_82575(hw);
+ /* reset page to 0 */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
* @hw: pointer to the HW structure
**/
-void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
+ DEBUGFUNC("e1000_power_up_serdes_link_82575");
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
- !igb_sgmii_active_82575(hw))
+ !e1000_sgmii_active_82575(hw))
return;
/* Enable PCS to turn on link */
- reg = rd32(E1000_PCS_CFG0);
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg |= E1000_PCS_CFG_PCS_EN;
- wr32(E1000_PCS_CFG0, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
/* Power up the laser */
- reg = rd32(E1000_CTRL_EXT);
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~E1000_CTRL_EXT_SDP3_DATA;
- wr32(E1000_CTRL_EXT, reg);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* flush the write to verify completion */
- wrfl();
- msleep(1);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
}
/**
- * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
@@ -1125,232 +1318,264 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
* Using the physical coding sub-layer (PCS), retrieve the current speed and
* duplex, then store the values in the pointers provided.
**/
-static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex)
{
struct e1000_mac_info *mac = &hw->mac;
u32 pcs;
+ u32 status;
- /* Set up defaults for the return values of this function */
- mac->serdes_has_link = false;
- *speed = 0;
- *duplex = 0;
+ DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
- /* Read the PCS Status register for link state. For non-copper mode,
+ /*
+ * Read the PCS Status register for link state. For non-copper mode,
* the status register is not accurate. The PCS status register is
* used instead.
*/
- pcs = rd32(E1000_PCS_LSTAT);
+ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
- /* The link up bit determines when link is up on autoneg. The sync ok
- * gets set once both sides sync up and agree upon link. Stable link
- * can be determined by checking for both link up and link sync ok
+ /*
+ * The link up bit determines when link is up on autoneg.
*/
- if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+ if (pcs & E1000_PCS_LSTS_LINK_OK) {
mac->serdes_has_link = true;
/* Detect and store PCS speed */
- if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
*speed = SPEED_1000;
- } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
*speed = SPEED_100;
- } else {
+ else
*speed = SPEED_10;
- }
/* Detect and store PCS duplex */
- if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
*duplex = FULL_DUPLEX;
- } else {
+ else
*duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ DEBUGOUT("2500 Mbs, ");
+ DEBUGOUT("Full Duplex\n");
+ }
}
+
+ } else {
+ mac->serdes_has_link = false;
+ *speed = 0;
+ *duplex = 0;
}
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_shutdown_serdes_link_82575 - Remove link during power down
+ * e1000_shutdown_serdes_link_82575 - Remove link during power down
* @hw: pointer to the HW structure
*
- * In the case of fiber serdes, shut down optics and PCS on driver unload
+ * In the case of serdes shut down sfp and PCS on driver unload
* when management pass thru is not enabled.
**/
-void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
- if (hw->phy.media_type != e1000_media_type_internal_serdes &&
- igb_sgmii_active_82575(hw))
+ DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !e1000_sgmii_active_82575(hw))
return;
- if (!igb_enable_mng_pass_thru(hw)) {
+ if (!e1000_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
- reg = rd32(E1000_PCS_CFG0);
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
- wr32(E1000_PCS_CFG0, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
/* shutdown the laser */
- reg = rd32(E1000_CTRL_EXT);
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg |= E1000_CTRL_EXT_SDP3_DATA;
- wr32(E1000_CTRL_EXT, reg);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* flush the write to verify completion */
- wrfl();
- msleep(1);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
}
+
+ return;
}
/**
- * igb_reset_hw_82575 - Reset hardware
+ * e1000_reset_hw_82575 - Reset hardware
* @hw: pointer to the HW structure
*
- * This resets the hardware into a known state. This is a
- * function pointer entry point called by the api module.
+ * This resets the hardware into a known state.
**/
-static s32 igb_reset_hw_82575(struct e1000_hw *hw)
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
- /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ DEBUGFUNC("e1000_reset_hw_82575");
+
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
- ret_val = igb_disable_pcie_master(hw);
+ ret_val = e1000_disable_pcie_master_generic(hw);
if (ret_val)
- hw_dbg("PCI-E Master disable polling has failed.\n");
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
/* set the completion timeout for interface */
- ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
- hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
+ ret_val = e1000_set_pcie_completion_timeout(hw);
+ if (ret_val)
+ DEBUGOUT("PCI-E Set completion timeout has failed.\n");
- hw_dbg("Masking off all interrupts\n");
- wr32(E1000_IMC, 0xffffffff);
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
- wr32(E1000_RCTL, 0);
- wr32(E1000_TCTL, E1000_TCTL_PSP);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
- msleep(10);
+ msec_delay(10);
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
- hw_dbg("Issuing a global reset to MAC\n");
- wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
+ DEBUGOUT("Issuing a global reset to MAC\n");
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
- ret_val = igb_get_auto_rd_done(hw);
+ ret_val = e1000_get_auto_rd_done_generic(hw);
if (ret_val) {
- /* When auto config read does not complete, do not
+ /*
+ * When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
- hw_dbg("Auto Read Done did not complete\n");
+ DEBUGOUT("Auto Read Done did not complete\n");
}
/* If EEPROM is not present, run manual init scripts */
- if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
- igb_reset_init_script_82575(hw);
+ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES))
+ e1000_reset_init_script_82575(hw);
/* Clear any pending interrupt events. */
- wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
/* Install any alternate MAC address into RAR0 */
- ret_val = igb_check_alt_mac_addr(hw);
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
return ret_val;
}
/**
- * igb_init_hw_82575 - Initialize hardware
+ * e1000_init_hw_82575 - Initialize hardware
* @hw: pointer to the HW structure
*
* This inits the hardware readying it for operation.
**/
-static s32 igb_init_hw_82575(struct e1000_hw *hw)
+s32 e1000_init_hw_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
+ DEBUGFUNC("e1000_init_hw_82575");
+
/* Initialize identification LED */
- ret_val = igb_id_led_init(hw);
+ ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
- hw_dbg("Error initializing identification LED\n");
+ DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
}
/* Disabling VLAN filtering */
- hw_dbg("Initializing the IEEE VLAN\n");
- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
- igb_clear_vfta_i350(hw);
- else
- igb_clear_vfta(hw);
+ DEBUGOUT("Initializing the IEEE VLAN\n");
+ mac->ops.clear_vfta(hw);
/* Setup the receive address */
- igb_init_rx_addrs(hw, rar_count);
+ e1000_init_rx_addrs_generic(hw, rar_count);
/* Zero out the Multicast HASH table */
- hw_dbg("Zeroing the MTA\n");
+ DEBUGOUT("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
- array_wr32(E1000_MTA, i, 0);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Zero out the Unicast HASH table */
- hw_dbg("Zeroing the UTA\n");
+ DEBUGOUT("Zeroing the UTA\n");
for (i = 0; i < mac->uta_reg_count; i++)
- array_wr32(E1000_UTA, i, 0);
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
/* Setup link and flow control */
- ret_val = igb_setup_link(hw);
+ ret_val = mac->ops.setup_link(hw);
+
+ /* Set the default MTU size */
+ hw->dev_spec._82575.mtu = 1500;
- /* Clear all of the statistics registers (clear on read). It is
+ /*
+ * Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
- igb_clear_hw_cntrs_82575(hw);
+ e1000_clear_hw_cntrs_82575(hw);
+
return ret_val;
}
/**
- * igb_setup_copper_link_82575 - Configure copper link settings
+ * e1000_setup_copper_link_82575 - Configure copper link settings
* @hw: pointer to the HW structure
*
* Configures the link for auto-neg or forced speed and duplex. Then we check
* for link, once link is established calls to configure collision distance
* and flow control are called.
**/
-static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
- s32 ret_val;
+ s32 ret_val;
u32 phpm_reg;
- ctrl = rd32(E1000_CTRL);
+ DEBUGFUNC("e1000_setup_copper_link_82575");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- /* Clear Go Link Disconnect bit */
- if (hw->mac.type >= e1000_82580) {
- phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
phpm_reg &= ~E1000_82580_PM_GO_LINKD;
- wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
}
- ret_val = igb_setup_serdes_link_82575(hw);
+ ret_val = e1000_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
- if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+ if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
/* allow time for SFP cage time to power up phy */
- msleep(300);
+ msec_delay(300);
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
+ DEBUGOUT("Error resetting the PHY.\n");
goto out;
}
}
@@ -1360,20 +1585,22 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
- case M88E1545_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
- ret_val = igb_copper_link_setup_m88_gen2(hw);
+ ret_val = e1000_copper_link_setup_m88_gen2(hw);
break;
default:
- ret_val = igb_copper_link_setup_m88(hw);
+ ret_val = e1000_copper_link_setup_m88(hw);
break;
}
break;
case e1000_phy_igp_3:
- ret_val = igb_copper_link_setup_igp(hw);
+ ret_val = e1000_copper_link_setup_igp(hw);
break;
case e1000_phy_82580:
- ret_val = igb_copper_link_setup_82580(hw);
+ ret_val = e1000_copper_link_setup_82577(hw);
break;
default:
ret_val = -E1000_ERR_PHY;
@@ -1383,13 +1610,13 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = igb_setup_copper_link(hw);
+ ret_val = e1000_setup_copper_link_generic(hw);
out:
return ret_val;
}
/**
- * igb_setup_serdes_link_82575 - Setup link for serdes
+ * e1000_setup_serdes_link_82575 - Setup link for serdes
* @hw: pointer to the HW structure
*
* Configure the physical coding sub-layer (PCS) link. The PCS link is
@@ -1397,45 +1624,40 @@ out:
* interface (sgmii), or serdes fiber is being used. Configures the link
* for auto-negotiation or forces speed/duplex.
**/
-static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
+ DEBUGFUNC("e1000_setup_serdes_link_82575");
+
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
- !igb_sgmii_active_82575(hw))
+ !e1000_sgmii_active_82575(hw))
return ret_val;
-
- /* On the 82575, SerDes loopback mode persists until it is
+ /*
+ * On the 82575, SerDes loopback mode persists until it is
* explicitly turned off or a power cycle is performed. A read to
* the register does not indicate its status. Therefore, we ensure
* loopback mode is disabled during initialization.
*/
- wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
- /* power on the sfp cage if present and turn on I2C */
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ /* power on the sfp cage if present */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
- wr32(E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
- ctrl_reg = rd32(E1000_CTRL);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg |= E1000_CTRL_SLU;
- if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
- /* set both sw defined pins */
+ /* set both sw defined pins on 82575/82576*/
+ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
- /* Set switch control to serdes energy detect */
- reg = rd32(E1000_CONNSW);
- reg |= E1000_CONNSW_ENRGSRC;
- wr32(E1000_CONNSW, reg);
- }
-
- reg = rd32(E1000_PCS_LCTL);
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
/* default pcs_autoneg to the same setting as mac autoneg */
pcs_autoneg = hw->mac.autoneg;
@@ -1450,12 +1672,13 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
/* disable PCS autoneg and support parallel detect only */
pcs_autoneg = false;
+ /* fall through to default case */
default:
if (hw->mac.type == e1000_82575 ||
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ DEBUGOUT("NVM Read Error\n");
return ret_val;
}
@@ -1463,27 +1686,29 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
pcs_autoneg = false;
}
- /* non-SGMII modes only supports a speed of 1000/Full for the
+ /*
+ * non-SGMII modes only supports a speed of 1000/Full for the
* link so it is best to just force the MAC and let the pcs
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
break;
}
- wr32(E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
- /* New SerDes mode allows for forcing speed or autonegotiating speed
+ /*
+ * New SerDes mode allows for forcing speed or autonegotiating speed
* at 1gb. Autoneg should be default set by most drivers. This is the
* mode that will be compatible with older link partners and switches.
* However, both are supported by the hardware and some drivers/tools.
*/
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
- E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+ E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
if (pcs_autoneg) {
/* Set PCS register for autoneg */
@@ -1494,8 +1719,9 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
/* Configure flow control advertisement for autoneg */
- anadv_reg = rd32(E1000_PCS_ANADV);
+ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+
switch (hw->fc.requested_mode) {
case e1000_fc_full:
case e1000_fc_rx_pause:
@@ -1508,251 +1734,480 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
default:
break;
}
- wr32(E1000_PCS_ANADV, anadv_reg);
- hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+ E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg);
+
+ DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
- reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
/* Force flow control for forced link */
reg |= E1000_PCS_LCTL_FORCE_FCTRL;
- hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+ DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
- wr32(E1000_PCS_LCTL, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+ if (!pcs_autoneg && !e1000_sgmii_active_82575(hw))
+ e1000_force_mac_fc_generic(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_get_media_type_82575 - derives current media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen reflecting few settings.
+ * The following are taken into account:
+ * - link mode set in the current port Init Control Word #3
+ * - current link mode settings in CSR register
+ * - MDIO vs. I2C PHY control interface chosen
+ * - SFP module media type
+ **/
+static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
+{
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext = 0;
+ u32 link_mode = 0;
+
+ /* Set internal phy as default */
+ dev_spec->sgmii_active = false;
+ dev_spec->module_plugged = false;
+
+ /* Get CSR setting */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+ /* extract link mode setting */
+ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ switch (link_mode) {
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_GMII:
+ hw->phy.media_type = e1000_media_type_copper;
+ break;
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ /* Get phy control interface type set (MDIO vs. I2C)*/
+ if (e1000_sgmii_uses_mdio_82575(hw)) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ break;
+ }
+ /* fall through for I2C based SGMII */
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ /* read media type from SFP EEPROM */
+ ret_val = e1000_set_sfp_media_type_82575(hw);
+ if ((ret_val != E1000_SUCCESS) ||
+ (hw->phy.media_type == e1000_media_type_unknown)) {
+ /*
+ * If media type was not identified then return media
+ * type defined by the CTRL_EXT settings.
+ */
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+
+ if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = true;
+ }
- if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
- igb_force_mac_fc(hw);
+ break;
+ }
+
+ /* do not change link mode for 100BaseFX */
+ if (dev_spec->eth_flags.e100_base_fx)
+ break;
+
+ /* change current link mode setting */
+ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+ else
+ ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ break;
+ }
return ret_val;
}
/**
- * igb_sgmii_active_82575 - Return sgmii state
+ * e1000_set_sfp_media_type_82575 - derives SFP module media type.
+ * @hw: pointer to the HW structure
+ *
+ * The media type is chosen based on SFP module.
+ * compatibility flags retrieved from SFP ID EEPROM.
+ **/
+static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_ERR_CONFIG;
+ u32 ctrl_ext = 0;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+ struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags;
+ u8 tranceiver_type = 0;
+ s32 timeout = 3;
+
+ /* Turn I2C interface ON and power on sfp cage */
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+ E1000_WRITE_FLUSH(hw);
+
+ /* Read SFP module data */
+ while (timeout) {
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+ &tranceiver_type);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ msec_delay(100);
+ timeout--;
+ }
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ ret_val = e1000_read_sfp_data_byte(hw,
+ E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+ (u8 *)eth_flags);
+ if (ret_val != E1000_SUCCESS)
+ goto out;
+
+ /* Check if there is some SFP module plugged and powered */
+ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+ dev_spec->module_plugged = true;
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e100_base_fx) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ } else if (eth_flags->e1000_base_t) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_copper;
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
+ goto out;
+ }
+ } else {
+ hw->phy.media_type = e1000_media_type_unknown;
+ }
+ ret_val = E1000_SUCCESS;
+out:
+ /* Restore I2C interface setting */
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ return ret_val;
+}
+
+/**
+ * e1000_valid_led_default_82575 - Verify a valid default LED config
+ * @hw: pointer to the HW structure
+ * @data: pointer to the NVM (EEPROM)
+ *
+ * Read the EEPROM for the current default LED configuration. If the
+ * LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_valid_led_default_82575");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ goto out;
+ }
+
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_internal_serdes:
+ *data = ID_LED_DEFAULT_82575_SERDES;
+ break;
+ case e1000_media_type_copper:
+ default:
+ *data = ID_LED_DEFAULT;
+ break;
+ }
+ }
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_sgmii_active_82575 - Return sgmii state
* @hw: pointer to the HW structure
*
* 82575 silicon has a serialized gigabit media independent interface (sgmii)
* which can be enabled for use in the embedded applications. Simply
* return the current state of the sgmii interface.
**/
-static bool igb_sgmii_active_82575(struct e1000_hw *hw)
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
{
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
return dev_spec->sgmii_active;
}
/**
- * igb_reset_init_script_82575 - Inits HW defaults after reset
+ * e1000_reset_init_script_82575 - Inits HW defaults after reset
* @hw: pointer to the HW structure
*
* Inits recommended HW defaults after a reset when there is no EEPROM
* detected. This is only for the 82575.
**/
-static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
+static s32 e1000_reset_init_script_82575(struct e1000_hw *hw)
{
+ DEBUGFUNC("e1000_reset_init_script_82575");
+
if (hw->mac.type == e1000_82575) {
- hw_dbg("Running reset init script for 82575\n");
+ DEBUGOUT("Running reset init script for 82575\n");
/* SerDes configuration via SERDESCTRL */
- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
- igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
/* CCM configuration via CCMCTL register */
- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
- igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
/* PCIe lanes configuration */
- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
- igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
/* PCIe PLL Configuration */
- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
- igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
}
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_read_mac_addr_82575 - Read device MAC address
+ * e1000_read_mac_addr_82575 - Read device MAC address
* @hw: pointer to the HW structure
**/
-static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_read_mac_addr_82575");
- /* If there's an alternate MAC address place it in RAR0
+ /*
+ * If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
- ret_val = igb_check_alt_mac_addr(hw);
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
- ret_val = igb_read_mac_addr(hw);
+ ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}
/**
- * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * e1000_config_collision_dist_82575 - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+ u32 tctl_ext;
+
+ DEBUGFUNC("e1000_config_collision_dist_82575");
+
+ tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+ tctl_ext &= ~E1000_TCTL_EXT_COLD;
+ tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+ E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
**/
-void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
{
+ struct e1000_phy_info *phy = &hw->phy;
+
+ if (!(phy->ops.check_reset_block))
+ return;
+
/* If the management interface is not enabled, then power down */
- if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
- igb_power_down_phy_copper(hw);
+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+ e1000_power_down_phy_copper(hw);
+
+ return;
}
/**
- * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
* Clears the hardware counters by reading the counter registers.
**/
-static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
-{
- igb_clear_hw_cntrs_base(hw);
-
- rd32(E1000_PRC64);
- rd32(E1000_PRC127);
- rd32(E1000_PRC255);
- rd32(E1000_PRC511);
- rd32(E1000_PRC1023);
- rd32(E1000_PRC1522);
- rd32(E1000_PTC64);
- rd32(E1000_PTC127);
- rd32(E1000_PTC255);
- rd32(E1000_PTC511);
- rd32(E1000_PTC1023);
- rd32(E1000_PTC1522);
-
- rd32(E1000_ALGNERRC);
- rd32(E1000_RXERRC);
- rd32(E1000_TNCRS);
- rd32(E1000_CEXTERR);
- rd32(E1000_TSCTC);
- rd32(E1000_TSCTFC);
-
- rd32(E1000_MGTPRC);
- rd32(E1000_MGTPDC);
- rd32(E1000_MGTPTC);
-
- rd32(E1000_IAC);
- rd32(E1000_ICRXOC);
-
- rd32(E1000_ICRXPTC);
- rd32(E1000_ICRXATC);
- rd32(E1000_ICTXPTC);
- rd32(E1000_ICTXATC);
- rd32(E1000_ICTXQEC);
- rd32(E1000_ICTXQMTC);
- rd32(E1000_ICRXDMTC);
-
- rd32(E1000_CBTMPC);
- rd32(E1000_HTDPMC);
- rd32(E1000_CBRMPC);
- rd32(E1000_RPTHC);
- rd32(E1000_HGPTC);
- rd32(E1000_HTCBDPC);
- rd32(E1000_HGORCL);
- rd32(E1000_HGORCH);
- rd32(E1000_HGOTCL);
- rd32(E1000_HGOTCH);
- rd32(E1000_LENERRS);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+ e1000_clear_hw_cntrs_base_generic(hw);
+
+ E1000_READ_REG(hw, E1000_PRC64);
+ E1000_READ_REG(hw, E1000_PRC127);
+ E1000_READ_REG(hw, E1000_PRC255);
+ E1000_READ_REG(hw, E1000_PRC511);
+ E1000_READ_REG(hw, E1000_PRC1023);
+ E1000_READ_REG(hw, E1000_PRC1522);
+ E1000_READ_REG(hw, E1000_PTC64);
+ E1000_READ_REG(hw, E1000_PTC127);
+ E1000_READ_REG(hw, E1000_PTC255);
+ E1000_READ_REG(hw, E1000_PTC511);
+ E1000_READ_REG(hw, E1000_PTC1023);
+ E1000_READ_REG(hw, E1000_PTC1522);
+
+ E1000_READ_REG(hw, E1000_ALGNERRC);
+ E1000_READ_REG(hw, E1000_RXERRC);
+ E1000_READ_REG(hw, E1000_TNCRS);
+ E1000_READ_REG(hw, E1000_CEXTERR);
+ E1000_READ_REG(hw, E1000_TSCTC);
+ E1000_READ_REG(hw, E1000_TSCTFC);
+
+ E1000_READ_REG(hw, E1000_MGTPRC);
+ E1000_READ_REG(hw, E1000_MGTPDC);
+ E1000_READ_REG(hw, E1000_MGTPTC);
+
+ E1000_READ_REG(hw, E1000_IAC);
+ E1000_READ_REG(hw, E1000_ICRXOC);
+
+ E1000_READ_REG(hw, E1000_ICRXPTC);
+ E1000_READ_REG(hw, E1000_ICRXATC);
+ E1000_READ_REG(hw, E1000_ICTXPTC);
+ E1000_READ_REG(hw, E1000_ICTXATC);
+ E1000_READ_REG(hw, E1000_ICTXQEC);
+ E1000_READ_REG(hw, E1000_ICTXQMTC);
+ E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+ E1000_READ_REG(hw, E1000_CBTMPC);
+ E1000_READ_REG(hw, E1000_HTDPMC);
+ E1000_READ_REG(hw, E1000_CBRMPC);
+ E1000_READ_REG(hw, E1000_RPTHC);
+ E1000_READ_REG(hw, E1000_HGPTC);
+ E1000_READ_REG(hw, E1000_HTCBDPC);
+ E1000_READ_REG(hw, E1000_HGORCL);
+ E1000_READ_REG(hw, E1000_HGORCH);
+ E1000_READ_REG(hw, E1000_HGOTCL);
+ E1000_READ_REG(hw, E1000_HGOTCH);
+ E1000_READ_REG(hw, E1000_LENERRS);
/* This register should not be read in copper configurations */
- if (hw->phy.media_type == e1000_media_type_internal_serdes ||
- igb_sgmii_active_82575(hw))
- rd32(E1000_SCVPC);
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+ e1000_sgmii_active_82575(hw))
+ E1000_READ_REG(hw, E1000_SCVPC);
}
/**
- * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
+ * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
* @hw: pointer to the HW structure
*
- * After rx enable if managability is enabled then there is likely some
+ * After Rx enable, if manageability is enabled then there is likely some
* bad data at the start of the fifo and possibly in the DMA fifo. This
* function clears the fifos and flushes any packets that came in as rx was
* being enabled.
**/
-void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
{
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
int i, ms_wait;
+ DEBUGFUNC("e1000_rx_fifo_flush_82575");
+
+ /* disable IPv6 options as per hardware errata */
+ rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+ rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
if (hw->mac.type != e1000_82575 ||
- !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
return;
- /* Disable all RX queues */
+ /* Disable all Rx queues */
for (i = 0; i < 4; i++) {
- rxdctl[i] = rd32(E1000_RXDCTL(i));
- wr32(E1000_RXDCTL(i),
- rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+ rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+ rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ msec_delay(1);
rx_enabled = 0;
for (i = 0; i < 4; i++)
- rx_enabled |= rd32(E1000_RXDCTL(i));
+ rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
break;
}
if (ms_wait == 10)
- hw_dbg("Queue disable timed out after 10ms\n");
+ DEBUGOUT("Queue disable timed out after 10ms\n");
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
* incoming packets are rejected. Set enable and wait 2ms so that
* any packet that was coming in as RCTL.EN was set is flushed
*/
- rfctl = rd32(E1000_RFCTL);
- wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
- rlpml = rd32(E1000_RLPML);
- wr32(E1000_RLPML, 0);
+ rlpml = E1000_READ_REG(hw, E1000_RLPML);
+ E1000_WRITE_REG(hw, E1000_RLPML, 0);
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
temp_rctl |= E1000_RCTL_LPE;
- wr32(E1000_RCTL, temp_rctl);
- wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
- wrfl();
- msleep(2);
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(2);
- /* Enable RX queues that were previously enabled and restore our
+ /* Enable Rx queues that were previously enabled and restore our
* previous state
*/
for (i = 0; i < 4; i++)
- wr32(E1000_RXDCTL(i), rxdctl[i]);
- wr32(E1000_RCTL, rctl);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ E1000_WRITE_FLUSH(hw);
- wr32(E1000_RLPML, rlpml);
- wr32(E1000_RFCTL, rfctl);
+ E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+ E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
/* Flush receive errors generated by workaround */
- rd32(E1000_ROC);
- rd32(E1000_RNBC);
- rd32(E1000_MPC);
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_MPC);
}
/**
- * igb_set_pcie_completion_timeout - set pci-e completion timeout
+ * e1000_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
*
* The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
@@ -1761,17 +2216,18 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
* increase the value to either 10ms to 200ms for capability version 1 config,
* or 16ms to 55ms for version 2.
**/
-static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
{
- u32 gcr = rd32(E1000_GCR);
- s32 ret_val = 0;
+ u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+ s32 ret_val = E1000_SUCCESS;
u16 pcie_devctl2;
/* only take action if timeout value is defaulted to 0 */
if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
goto out;
- /* if capabilities version is type 1 we can write the
+ /*
+ * if capababilities version is type 1 we can write the
* timeout of 10ms to 200ms through the GCR register
*/
if (!(gcr & E1000_GCR_CAP_VER2)) {
@@ -1779,36 +2235,37 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
goto out;
}
- /* for version 2 capabilities we need to write the config space
+ /*
+ * for version 2 capabilities we need to write the config space
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
- ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
- ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
- wr32(E1000_GCR, gcr);
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
return ret_val;
}
/**
- * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
* @hw: pointer to the hardware struct
* @enable: state to enter, either enabled or disabled
* @pf: Physical Function pool - do not set anti-spoofing for the PF
*
* enables/disables L2 switch anti-spoofing functionality.
**/
-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
{
u32 reg_val, reg_offset;
@@ -1824,7 +2281,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
return;
}
- reg_val = rd32(reg_offset);
+ reg_val = E1000_READ_REG(hw, reg_offset);
if (enable) {
reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
@@ -1836,66 +2293,67 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
}
- wr32(reg_offset, reg_val);
+ E1000_WRITE_REG(hw, reg_offset, reg_val);
}
/**
- * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
* @hw: pointer to the hardware struct
* @enable: state to enter, either enabled or disabled
*
* enables/disables L2 switch loopback functionality.
**/
-void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
{
u32 dtxswc;
switch (hw->mac.type) {
case e1000_82576:
- dtxswc = rd32(E1000_DTXSWC);
+ dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
if (enable)
dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
else
dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- wr32(E1000_DTXSWC, dtxswc);
+ E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
break;
- case e1000_i354:
case e1000_i350:
- dtxswc = rd32(E1000_TXSWC);
+ case e1000_i354:
+ dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
if (enable)
dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
else
dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
- wr32(E1000_TXSWC, dtxswc);
+ E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
break;
default:
/* Currently no other hardware supports loopback */
break;
}
+
}
/**
- * igb_vmdq_set_replication_pf - enable or disable vmdq replication
+ * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
* @hw: pointer to the hardware struct
* @enable: state to enter, either enabled or disabled
*
* enables/disables replication of packets across multiple pools.
**/
-void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
{
- u32 vt_ctl = rd32(E1000_VT_CTL);
+ u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
if (enable)
vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
else
vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
- wr32(E1000_VT_CTL, vt_ctl);
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
}
/**
- * igb_read_phy_reg_82580 - Read 82580 MDI control register
+ * e1000_read_phy_reg_82580 - Read 82580 MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
@@ -1903,15 +2361,17 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
* Reads the MDI control register in the PHY at offset and stores the
* information read to data.
**/
-static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
+ DEBUGFUNC("e1000_read_phy_reg_82580");
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
- ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1920,23 +2380,24 @@ out:
}
/**
- * igb_write_phy_reg_82580 - Write 82580 MDI control register
+ * e1000_write_phy_reg_82580 - Write 82580 MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write to register at offset
*
* Writes data to MDI control register in the PHY at offset.
**/
-static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
+ DEBUGFUNC("e1000_write_phy_reg_82580");
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
- ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@@ -1945,124 +2406,133 @@ out:
}
/**
- * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
* This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
-static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u32 mdicnfg;
u16 nvm_data = 0;
+ DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
if (hw->mac.type != e1000_82580)
goto out;
- if (!igb_sgmii_active_82575(hw))
+ if (!e1000_sgmii_active_82575(hw))
goto out;
ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
+ DEBUGOUT("NVM Read Error\n");
goto out;
}
- mdicnfg = rd32(E1000_MDICNFG);
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
if (nvm_data & NVM_WORD24_EXT_MDIO)
mdicnfg |= E1000_MDICNFG_EXT_MDIO;
if (nvm_data & NVM_WORD24_COM_MDIO)
mdicnfg |= E1000_MDICNFG_COM_MDIO;
- wr32(E1000_MDICNFG, mdicnfg);
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
out:
return ret_val;
}
/**
- * igb_reset_hw_82580 - Reset hardware
+ * e1000_reset_hw_82580 - Reset hardware
* @hw: pointer to the HW structure
*
* This resets function or entire device (all ports, etc.)
* to a known state.
**/
-static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl, icr;
+ u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+ DEBUGFUNC("e1000_reset_hw_82580");
hw->dev_spec._82575.global_device_reset = false;
- /* due to hw errata, global device reset doesn't always
- * work on 82580
- */
+ /* 82580 does not reliably do global_device_reset due to hw errata */
if (hw->mac.type == e1000_82580)
global_device_reset = false;
/* Get current control state. */
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
- /* Prevent the PCI-E bus from sticking if there is no TLP connection
+ /*
+ * Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
- ret_val = igb_disable_pcie_master(hw);
+ ret_val = e1000_disable_pcie_master_generic(hw);
if (ret_val)
- hw_dbg("PCI-E Master disable polling has failed.\n");
+ DEBUGOUT("PCI-E Master disable polling has failed.\n");
- hw_dbg("Masking off all interrupts\n");
- wr32(E1000_IMC, 0xffffffff);
- wr32(E1000_RCTL, 0);
- wr32(E1000_TCTL, E1000_TCTL_PSP);
- wrfl();
+ DEBUGOUT("Masking off all interrupts\n");
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_WRITE_REG(hw, E1000_RCTL, 0);
+ E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+ E1000_WRITE_FLUSH(hw);
- msleep(10);
+ msec_delay(10);
/* Determine whether or not a global dev reset is requested */
- if (global_device_reset &&
- hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
+ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw,
+ swmbsw_mask))
global_device_reset = false;
- if (global_device_reset &&
- !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+ if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STAT_DEV_RST_SET))
ctrl |= E1000_CTRL_DEV_RST;
else
ctrl |= E1000_CTRL_RST;
- wr32(E1000_CTRL, ctrl);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
- /* Add delay to insure DEV_RST has time to complete */
- if (global_device_reset)
- msleep(5);
+ switch (hw->device_id) {
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ break;
+ default:
+ E1000_WRITE_FLUSH(hw);
+ break;
+ }
+
+ /* Add delay to insure DEV_RST or RST has time to complete */
+ msec_delay(5);
- ret_val = igb_get_auto_rd_done(hw);
+ ret_val = e1000_get_auto_rd_done_generic(hw);
if (ret_val) {
- /* When auto config read does not complete, do not
+ /*
+ * When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
- hw_dbg("Auto Read Done did not complete\n");
+ DEBUGOUT("Auto Read Done did not complete\n");
}
/* clear global device reset status bit */
- wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
+ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
/* Clear any pending interrupt events. */
- wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+ E1000_READ_REG(hw, E1000_ICR);
- ret_val = igb_reset_mdicnfg_82580(hw);
+ ret_val = e1000_reset_mdicnfg_82580(hw);
if (ret_val)
- hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+ DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
/* Install any alternate MAC address into RAR0 */
- ret_val = igb_check_alt_mac_addr(hw);
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
/* Release semaphore */
if (global_device_reset)
@@ -2072,7 +2542,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
}
/**
- * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
* @data: data received by reading RXPBS register
*
* The 82580 uses a table based approach for packet buffer allocation sizes.
@@ -2081,7 +2551,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
* 0x0 36 72 144 1 2 4 8 16
* 0x8 35 70 140 rsv rsv rsv rsv rsv
*/
-u16 igb_rxpbs_adjust_82580(u32 data)
+u16 e1000_rxpbs_adjust_82580(u32 data)
{
u16 ret_val = 0;
@@ -2092,7 +2562,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
}
/**
- * igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
* checksum
* @hw: pointer to the HW structure
* @offset: offset in words of the checksum protected region
@@ -2100,24 +2570,25 @@ u16 igb_rxpbs_adjust_82580(u32 data)
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
- u16 offset)
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 checksum = 0;
u16 i, nvm_data;
+ DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
+ DEBUGOUT("NVM Read Error\n");
goto out;
}
checksum += nvm_data;
}
if (checksum != (u16) NVM_SUM) {
- hw_dbg("NVM Checksum Invalid\n");
+ DEBUGOUT("NVM Checksum Invalid\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
@@ -2127,7 +2598,7 @@ out:
}
/**
- * igb_update_nvm_checksum_with_offset - Update EEPROM
+ * e1000_update_nvm_checksum_with_offset - Update EEPROM
* checksum
* @hw: pointer to the HW structure
* @offset: offset in words of the checksum protected region
@@ -2136,63 +2607,66 @@ out:
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
-static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
{
s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
+ DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum.\n");
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
- &checksum);
+ &checksum);
if (ret_val)
- hw_dbg("NVM Write Error while updating checksum.\n");
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
out:
return ret_val;
}
/**
- * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM section checksum by reading/adding each word of
* the EEPROM and then verifies that the sum of the EEPROM is
* equal to 0xBABA.
**/
-static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 eeprom_regions_count = 1;
u16 j, nvm_data;
u16 nvm_offset;
+ DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
+ DEBUGOUT("NVM Read Error\n");
goto out;
}
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
- /* if checksums compatibility bit is set validate checksums
- * for all 4 ports.
- */
+ /* if chekcsums compatibility bit is set validate checksums
+ * for all 4 ports. */
eeprom_regions_count = 4;
}
for (j = 0; j < eeprom_regions_count; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = igb_validate_nvm_checksum_with_offset(hw,
- nvm_offset);
- if (ret_val != 0)
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
goto out;
}
@@ -2201,41 +2675,41 @@ out:
}
/**
- * igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ * e1000_update_nvm_checksum_82580 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM section checksums for all 4 ports by reading/adding
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
* checksum and writes the value to the EEPROM.
**/
-static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
{
s32 ret_val;
u16 j, nvm_data;
u16 nvm_offset;
+ DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
- if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+ if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) {
/* set compatibility bit to validate checksums appropriately */
nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
- &nvm_data);
+ &nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
if (ret_val)
goto out;
}
@@ -2245,24 +2719,26 @@ out:
}
/**
- * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM section checksum by reading/adding each word of
* the EEPROM and then verifies that the sum of the EEPROM is
* equal to 0xBABA.
**/
-static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 j;
u16 nvm_offset;
+ DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = igb_validate_nvm_checksum_with_offset(hw,
- nvm_offset);
- if (ret_val != 0)
+ ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+ nvm_offset);
+ if (ret_val != E1000_SUCCESS)
goto out;
}
@@ -2271,23 +2747,25 @@ out:
}
/**
- * igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM section checksums for all 4 ports by reading/adding
* each word of the EEPROM up to the checksum. Then calculates the EEPROM
* checksum and writes the value to the EEPROM.
**/
-static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 j;
u16 nvm_offset;
+ DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
for (j = 0; j < 4; j++) {
nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
- ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
- if (ret_val != 0)
+ ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+ if (ret_val != E1000_SUCCESS)
goto out;
}
@@ -2296,16 +2774,18 @@ out:
}
/**
- * __igb_access_emi_reg - Read/write EMI register
+ * __e1000_access_emi_reg - Read/write EMI register
* @hw: pointer to the HW structure
* @addr: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
**/
-static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
+static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_emi_reg");
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val)
@@ -2320,126 +2800,338 @@ static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
}
/**
- * igb_read_emi_reg - Read Extended Management Interface register
+ * e1000_read_emi_reg - Read Extended Management Interface register
* @hw: pointer to the HW structure
* @addr: EMI address to program
* @data: value to be read from the EMI address
**/
-s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
{
- return __igb_access_emi_reg(hw, addr, data, true);
+ DEBUGFUNC("e1000_read_emi_reg");
+
+ return __e1000_access_emi_reg(hw, addr, data, true);
}
/**
- * igb_set_eee_i350 - Enable/disable EEE support
+ * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY
* @hw: pointer to the HW structure
*
+ * Initialize Marvell 1512 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1512_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1512_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY
+ * @hw: pointer to the HW structure
+ *
+ * Initialize Marvell 1543 to work correctly with Avoton.
+ **/
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_initialize_M88E1543_phy");
+
+ /* Check if this is correct PHY. */
+ if (phy->id != M88E1543_E_PHY_ID)
+ goto out;
+
+ /* Switch to PHY page 0xFF. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0xFB. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 0x12. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to SGMII-to-Copper */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+ if (ret_val)
+ goto out;
+
+ /* Switch to PHY page 1. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
+ if (ret_val)
+ goto out;
+
+ /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
+ if (ret_val)
+ goto out;
+
+ /* Return the PHY to page 0. */
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+ if (ret_val)
+ goto out;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+
+ msec_delay(1000);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_eee_i350 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
+ *
* Enable/disable EEE based on setting in dev_spec structure.
*
**/
-s32 igb_set_eee_i350(struct e1000_hw *hw)
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
- s32 ret_val = 0;
u32 ipcnfg, eeer;
+ DEBUGFUNC("e1000_set_eee_i350");
+
if ((hw->mac.type < e1000_i350) ||
(hw->phy.media_type != e1000_media_type_copper))
goto out;
- ipcnfg = rd32(E1000_IPCNFG);
- eeer = rd32(E1000_EEER);
+ ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+ eeer = E1000_READ_REG(hw, E1000_EEER);
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
- u32 eee_su = rd32(E1000_EEE_SU);
+ u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU);
+
+ if (adv100M)
+ ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+ if (adv1G)
+ ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+ else
+ ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
- E1000_EEER_LPI_FC);
+ E1000_EEER_LPI_FC);
/* This bit should not be set in normal operation. */
if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
- hw_dbg("LPI Clock Stop Bit should not be set!\n");
-
+ DEBUGOUT("LPI Clock Stop Bit should not be set!\n");
} else {
- ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
- E1000_IPCNFG_EEE_100M_AN);
- eeer &= ~(E1000_EEER_TX_LPI_EN |
- E1000_EEER_RX_LPI_EN |
- E1000_EEER_LPI_FC);
- }
- wr32(E1000_IPCNFG, ipcnfg);
- wr32(E1000_EEER, eeer);
- rd32(E1000_IPCNFG);
- rd32(E1000_EEER);
+ ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+ E1000_EEER_LPI_FC);
+ }
+ E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+ E1000_WRITE_REG(hw, E1000_EEER, eeer);
+ E1000_READ_REG(hw, E1000_IPCNFG);
+ E1000_READ_REG(hw, E1000_EEER);
out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_set_eee_i354 - Enable/disable EEE support
+ * e1000_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
+ * @adv1g: boolean flag enabling 1G EEE advertisement
+ * @adv100m: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
**/
-s32 igb_set_eee_i354(struct e1000_hw *hw)
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 phy_data;
+ DEBUGFUNC("e1000_set_eee_i354");
+
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
/* Switch to PHY page 18. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
&phy_data);
if (ret_val)
goto out;
- phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
phy_data);
if (ret_val)
goto out;
/* Return the PHY to page 0. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
if (ret_val)
goto out;
/* Turn on EEE advertisement. */
- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- &phy_data);
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
if (ret_val)
goto out;
- phy_data |= E1000_EEE_ADV_100_SUPPORTED |
- E1000_EEE_ADV_1000_SUPPORTED;
- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ if (adv100M)
+ phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+ if (adv1G)
+ phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+ else
+ phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
E1000_EEE_ADV_DEV_I354,
phy_data);
} else {
/* Turn off EEE advertisement. */
- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- &phy_data);
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ &phy_data);
if (ret_val)
goto out;
phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
E1000_EEE_ADV_1000_SUPPORTED);
- ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
- E1000_EEE_ADV_DEV_I354,
- phy_data);
+ ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+ E1000_EEE_ADV_DEV_I354,
+ phy_data);
}
out:
@@ -2447,27 +3139,30 @@ out:
}
/**
- * igb_get_eee_status_i354 - Get EEE status
+ * e1000_get_eee_status_i354 - Get EEE status
* @hw: pointer to the HW structure
* @status: EEE status
*
* Get EEE status by guessing based on whether Tx or Rx LPI indications have
* been received.
**/
-s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 phy_data;
+ DEBUGFUNC("e1000_get_eee_status_i354");
+
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ ((phy->id != M88E1543_E_PHY_ID) &&
+ (phy->id != M88E1512_E_PHY_ID)))
goto out;
- ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
- E1000_PCS_STATUS_DEV_I354,
- &phy_data);
+ ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+ E1000_PCS_STATUS_DEV_I354,
+ &phy_data);
if (ret_val)
goto out;
@@ -2478,6 +3173,602 @@ out:
return ret_val;
}
+/* Due to a hw errata, if the host tries to configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ * e1000_clear_vfta_i350 - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * Clears the register array which contains the VLAN filter table by
+ * setting all the values to 0.
+ **/
+void e1000_clear_vfta_i350(struct e1000_hw *hw)
+{
+ u32 offset;
+ int i;
+
+ DEBUGFUNC("e1000_clear_vfta_350");
+
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
+/**
+ * e1000_write_vfta_i350 - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: register offset in VLAN filter table
+ * @value: register value written to VLAN filter table
+ *
+ * Writes value at the given offset in the register array which stores
+ * the VLAN filter table.
+ **/
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ int i;
+
+ DEBUGFUNC("e1000_write_vfta_350");
+
+ for (i = 0; i < 10; i++)
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_set_i2c_bb - Enable I2C bit-bang
+ * @hw: pointer to the HW structure
+ *
+ * Enable I2C bit-bang interface
+ *
+ **/
+s32 e1000_set_i2c_bb(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+ u32 ctrl_ext, i2cparams;
+
+ DEBUGFUNC("e1000_set_i2c_bb");
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+
+ i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ i2cparams |= E1000_I2CBB_EN;
+ i2cparams |= E1000_I2C_DATA_OE_N;
+ i2cparams |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams);
+ E1000_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 10;
+ u32 retry = 1;
+ u16 swfw_mask = 0;
+
+ bool nack = true;
+
+ DEBUGFUNC("e1000_read_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ do {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto read_byte_out;
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and write indication */
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_start(hw);
+
+ /* Device Address and read indication */
+ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_in_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_bit(hw, nack);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ msec_delay(100);
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte read error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte read error.\n");
+
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ **/
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ u32 max_retry = 1;
+ u32 retry = 0;
+ u16 swfw_mask = 0;
+
+ DEBUGFUNC("e1000_write_i2c_byte_generic");
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) {
+ status = E1000_ERR_SWFW_SYNC;
+ goto write_byte_out;
+ }
+
+ do {
+ e1000_i2c_start(hw);
+
+ status = e1000_clock_out_i2c_byte(hw, dev_addr);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, byte_offset);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_clock_out_i2c_byte(hw, data);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ status = e1000_get_i2c_ack(hw);
+ if (status != E1000_SUCCESS)
+ goto fail;
+
+ e1000_i2c_stop(hw);
+ break;
+
+fail:
+ e1000_i2c_bus_clear(hw);
+ retry++;
+ if (retry < max_retry)
+ DEBUGOUT("I2C byte write error - Retrying.\n");
+ else
+ DEBUGOUT("I2C byte write error.\n");
+ } while (retry < max_retry);
+
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+
+ return status;
+}
+
+/**
+ * e1000_i2c_start - Sets I2C start condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void e1000_i2c_start(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_start");
+
+ /* Start condition must begin with data and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for start condition (4.7us) */
+ usec_delay(E1000_I2C_T_SU_STA);
+
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+
+ /* Hold time for start condition (4us) */
+ usec_delay(E1000_I2C_T_HD_STA);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+}
+
+/**
+ * e1000_i2c_stop - Sets I2C stop condition
+ * @hw: pointer to hardware structure
+ *
+ * Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void e1000_i2c_stop(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_i2c_stop");
+
+ /* Stop condition must begin with data low and clock high */
+ e1000_set_i2c_data(hw, &i2cctl, 0);
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Setup time for stop condition (4us) */
+ usec_delay(E1000_I2C_T_SU_STO);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ /* bus free time between stop and start (4.7us)*/
+ usec_delay(E1000_I2C_T_BUF);
+}
+
+/**
+ * e1000_clock_in_i2c_byte - Clocks in one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte to clock in
+ *
+ * Clocks in one byte data via I2C data/clock
+ **/
+static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data)
+{
+ s32 i;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_in_i2c_byte");
+
+ *data = 0;
+ for (i = 7; i >= 0; i--) {
+ e1000_clock_in_i2c_bit(hw, &bit);
+ *data |= bit << i;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_byte - Clocks out one byte via I2C
+ * @hw: pointer to hardware structure
+ * @data: data byte clocked out
+ *
+ * Clocks out one byte data via I2C data/clock
+ **/
+static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data)
+{
+ s32 status = E1000_SUCCESS;
+ s32 i;
+ u32 i2cctl;
+ bool bit = 0;
+
+ DEBUGFUNC("e1000_clock_out_i2c_byte");
+
+ for (i = 7; i >= 0; i--) {
+ bit = (data >> i) & 0x1;
+ status = e1000_clock_out_i2c_bit(hw, bit);
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ /* Release SDA line (set high) */
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ i2cctl |= E1000_I2C_DATA_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_ack - Polls for I2C ACK
+ * @hw: pointer to hardware structure
+ *
+ * Clocks in/out one bit via I2C data/clock
+ **/
+static s32 e1000_get_i2c_ack(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u32 i = 0;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 timeout = 10;
+ bool ack = true;
+
+ DEBUGFUNC("e1000_get_i2c_ack");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ /* Wait until SCL returns high */
+ for (i = 0; i < timeout; i++) {
+ usec_delay(1);
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (i2cctl & E1000_I2C_CLK_IN)
+ break;
+ }
+ if (!(i2cctl & E1000_I2C_CLK_IN))
+ return E1000_ERR_I2C;
+
+ ack = e1000_get_i2c_data(&i2cctl);
+ if (ack) {
+ DEBUGOUT("I2C ack was not received.\n");
+ status = E1000_ERR_I2C;
+ }
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return status;
+}
+
+/**
+ * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: read data value
+ *
+ * Clocks in one bit via I2C data/clock
+ **/
+static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_in_i2c_bit");
+
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ *data = e1000_get_i2c_data(&i2cctl);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us */
+ usec_delay(E1000_I2C_T_LOW);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ * @hw: pointer to hardware structure
+ * @data: data value to write
+ *
+ * Clocks out one bit via I2C data/clock
+ **/
+static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data)
+{
+ s32 status;
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ DEBUGFUNC("e1000_clock_out_i2c_bit");
+
+ status = e1000_set_i2c_data(hw, &i2cctl, data);
+ if (status == E1000_SUCCESS) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Minimum high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Minimum low period of clock is 4.7 us.
+ * This also takes care of the data hold time.
+ */
+ usec_delay(E1000_I2C_T_LOW);
+ } else {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("I2C data was not set to %X\n", data);
+ }
+
+ return status;
+}
+/**
+ * e1000_raise_i2c_clk - Raises the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Raises the I2C clock line '0'->'1'
+ **/
+static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+ DEBUGFUNC("e1000_raise_i2c_clk");
+
+ *i2cctl |= E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL rise time (1000ns) */
+ usec_delay(E1000_I2C_T_RISE);
+}
+
+/**
+ * e1000_lower_i2c_clk - Lowers the I2C SCL clock
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Lowers the I2C clock line '1'->'0'
+ **/
+static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl)
+{
+
+ DEBUGFUNC("e1000_lower_i2c_clk");
+
+ *i2cctl &= ~E1000_I2C_CLK_OUT;
+ *i2cctl &= ~E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* SCL fall time (300ns) */
+ usec_delay(E1000_I2C_T_FALL);
+}
+
+/**
+ * e1000_set_i2c_data - Sets the I2C data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ * @data: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ **/
+static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data)
+{
+ s32 status = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_i2c_data");
+
+ if (data)
+ *i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ *i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ *i2cctl &= ~E1000_I2C_DATA_OE_N;
+ *i2cctl |= E1000_I2C_CLK_OE_N;
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA);
+
+ *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ if (data != e1000_get_i2c_data(i2cctl)) {
+ status = E1000_ERR_I2C;
+ DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+ }
+
+ return status;
+}
+
+/**
+ * e1000_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ **/
+static bool e1000_get_i2c_data(u32 *i2cctl)
+{
+ bool data;
+
+ DEBUGFUNC("e1000_get_i2c_data");
+
+ if (*i2cctl & E1000_I2C_DATA_IN)
+ data = 1;
+ else
+ data = 0;
+
+ return data;
+}
+
+/**
+ * e1000_i2c_bus_clear - Clears the I2C bus
+ * @hw: pointer to hardware structure
+ *
+ * Clears the I2C bus by sending nine clock pulses.
+ * Used when data line is stuck low.
+ **/
+void e1000_i2c_bus_clear(struct e1000_hw *hw)
+{
+ u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+ u32 i;
+
+ DEBUGFUNC("e1000_i2c_bus_clear");
+
+ e1000_i2c_start(hw);
+
+ e1000_set_i2c_data(hw, &i2cctl, 1);
+
+ for (i = 0; i < 9; i++) {
+ e1000_raise_i2c_clk(hw, &i2cctl);
+
+ /* Min high period of clock is 4us */
+ usec_delay(E1000_I2C_T_HIGH);
+
+ e1000_lower_i2c_clk(hw, &i2cctl);
+
+ /* Min low period of clock is 4.7us*/
+ usec_delay(E1000_I2C_T_LOW);
+ }
+
+ e1000_i2c_start(hw);
+
+ /* Put the i2c bus back to default state */
+ e1000_i2c_stop(hw);
+}
+
static const u8 e1000_emc_temp_data[4] = {
E1000_EMC_INTERNAL_DATA,
E1000_EMC_DIODE1_DATA,
@@ -2492,14 +3783,13 @@ static const u8 e1000_emc_therm_limit[4] = {
};
/**
- * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Updates the temperatures in mac.thermal_sensor_data
**/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2509,17 +3799,19 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
u8 i;
struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+ DEBUGFUNC("e1000_get_thermal_sensor_data_generic");
+
if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
return E1000_NOT_IMPLEMENTED;
- data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+ data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF);
/* Return the internal sensor only if ETS is unsupported */
- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return E1000_SUCCESS;
- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
@@ -2529,7 +3821,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
num_sensors = E1000_MAX_SENSORS;
for (i = 1; i < num_sensors; i++) {
- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
NVM_ETS_DATA_INDEX_SHIFT);
sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
@@ -2541,19 +3833,18 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp);
}
- return status;
+ return E1000_SUCCESS;
}
/**
- * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
* @hw: pointer to hardware structure
*
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2565,6 +3856,8 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
u8 i;
struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+ DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic");
+
if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
return E1000_NOT_IMPLEMENTED;
@@ -2572,16 +3865,16 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
data->sensor[0].location = 0x1;
data->sensor[0].caution_thresh =
- (rd32(E1000_THHIGHTC) & 0xFF);
+ (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF);
data->sensor[0].max_op_thresh =
- (rd32(E1000_THLOWTC) & 0xFF);
+ (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF);
/* Return the internal sensor only if ETS is unsupported */
- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return E1000_SUCCESS;
- hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ e1000_read_nvm(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
!= NVM_ETS_TYPE_EMC)
return E1000_NOT_IMPLEMENTED;
@@ -2591,7 +3884,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
for (i = 1; i <= num_sensors; i++) {
- hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor);
sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
NVM_ETS_DATA_INDEX_SHIFT);
sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
@@ -2610,40 +3903,5 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta;
}
}
- return status;
+ return E1000_SUCCESS;
}
-
-static struct e1000_mac_operations e1000_mac_ops_82575 = {
- .init_hw = igb_init_hw_82575,
- .check_for_link = igb_check_for_link_82575,
- .rar_set = igb_rar_set,
- .read_mac_addr = igb_read_mac_addr_82575,
- .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
-#ifdef CONFIG_IGB_HWMON
- .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
- .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
-#endif
-};
-
-static struct e1000_phy_operations e1000_phy_ops_82575 = {
- .acquire = igb_acquire_phy_82575,
- .get_cfg_done = igb_get_cfg_done_82575,
- .release = igb_release_phy_82575,
- .write_i2c_byte = igb_write_i2c_byte,
- .read_i2c_byte = igb_read_i2c_byte,
-};
-
-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
- .acquire = igb_acquire_nvm_82575,
- .read = igb_read_nvm_eerd,
- .release = igb_release_nvm_82575,
- .write = igb_write_nvm_spi,
-};
-
-const struct e1000_info e1000_82575_info = {
- .get_invariants = igb_get_invariants_82575,
- .mac_ops = &e1000_mac_ops_82575,
- .phy_ops = &e1000_phy_ops_82575,
- .nvm_ops = &e1000_nvm_ops_82575,
-};
-
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 74a1506b4235..94dd928c27b2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,94 +25,246 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
-
-#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
-
-#define E1000_RAR_ENTRIES_82575 16
-#define E1000_RAR_ENTRIES_82576 24
-#define E1000_RAR_ENTRIES_82580 24
-#define E1000_RAR_ENTRIES_I350 32
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575 16
+#define E1000_RAR_ENTRIES_82576 24
+#define E1000_RAR_ENTRIES_82580 24
+#define E1000_RAR_ENTRIES_I350 32
+#define E1000_SW_SYNCH_MB 0x00000100
+#define E1000_STAT_DEV_RST_SET 0x00100000
+#define E1000_CTRL_DEV_RST 0x20000000
+
+struct e1000_adv_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ u32 data;
+ struct {
+ u32 datalen:16; /* Data buffer length */
+ u32 rsvd:4;
+ u32 dtyp:4; /* Descriptor type */
+ u32 dcmd:8; /* Descriptor command */
+ } config;
+ } lower;
+ union {
+ u32 data;
+ struct {
+ u32 status:4; /* Descriptor status */
+ u32 idx:4;
+ u32 popts:6; /* Packet Options */
+ u32 paylen:18; /* Payload length */
+ } options;
+ } upper;
+};
-#define E1000_SW_SYNCH_MB 0x00000100
-#define E1000_STAT_DEV_RST_SET 0x00100000
-#define E1000_CTRL_DEV_RST 0x20000000
+#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
+#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
+#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+ union {
+ u32 ip_config;
+ struct {
+ u32 iplen:9;
+ u32 maclen:7;
+ u32 vlan_tag:16;
+ } fields;
+ } ip_setup;
+ u32 seq_num;
+ union {
+ u64 l4_config;
+ struct {
+ u32 mkrloc:9;
+ u32 tucmd:11;
+ u32 dtyp:4;
+ u32 adv:8;
+ u32 rsvd:4;
+ u32 idx:4;
+ u32 l4len:8;
+ u32 mss:16;
+ } fields;
+ } l4_setup;
+};
/* SRRCTL bit definitions */
-#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
-#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
-#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
-#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
-#define E1000_SRRCTL_DROP_EN 0x80000000
-#define E1000_SRRCTL_TIMESTAMP 0x40000000
-
-
-#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
-#define E1000_MRQC_ENABLE_VMDQ 0x00000003
-#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
-#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
-#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
+#define E1000_SRRCTL_TIMESTAMP 0x40000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE 0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
+ E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+ E1000_EIMS_RX_QUEUE | \
+ E1000_EIMS_TX_QUEUE | \
+ E1000_EIMS_TCP_TIMER | \
+ E1000_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
-#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
-#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
+#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
+#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
- struct {
- __le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /*RSS type, Pkt type*/
+ /* Split Header, header buffer len */
+ __le16 hdr_info;
+ } hs_rss;
} lo_dword;
union {
- __le32 rss; /* RSS Hash */
+ __le32 rss; /* RSS Hash */
struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
- __le32 status_error; /* ext status/error */
- __le16 length; /* Packet length */
- __le16 vlan; /* VLAN tag */
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
-#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
-#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
-#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
-#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT 12
+#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
+#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
+#define E1000_RXDADV_SPH 0x8000
+#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO 0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0
+#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00
+#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
@@ -132,16 +281,26 @@ union e1000_adv_tx_desc {
};
/* Adv Transmit Descriptor Config Masks */
-#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
-#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
-#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
-#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
-#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
-#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
-#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
-#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
-#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
+#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
+#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+/* 1st & Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
+#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
/* Context descriptors */
struct e1000_adv_tx_context_desc {
@@ -151,124 +310,174 @@ struct e1000_adv_tx_context_desc {
__le32 mss_l4len_idx;
};
-#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
-#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
-#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
-#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
/* IPSec Encrypt Enable for ESP */
-#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
-#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
+/* Req requires Markers and CRC */
+#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
+#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
/* Additional Transmit Descriptor Control definitions */
-#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY 0x08000000
/* Additional Receive Descriptor Control definitions */
-#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
-
-#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-
-#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
-
-/* Additional DCA related definitions, note change in position of CPUID */
-#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
-#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
-#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
-#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
+#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
+#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_1588 (1 << 30)
-
-/* FTQF register bit definitions */
-#define E1000_FTQF_VF_BP 0x00008000
-#define E1000_FTQF_1588_TIME_STAMP 0x08000000
-#define E1000_FTQF_MASK 0xF0000000
-#define E1000_FTQF_MASK_PROTO_BP 0x10000000
-#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
-
-#define E1000_NVM_APME_82575 0x0400
-#define MAX_NUM_VFS 8
-
-#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
-#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
-#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
-#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_ETQF_FILTER_ENABLE (1 << 26)
+#define E1000_ETQF_IMM_INT (1 << 29)
+#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ * to avoid filter collisions later. Add new filters
+ * here!!
+ *
+ * Current filters:
+ * EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL 0
+
+#define E1000_FTQF_VF_BP 0x00008000
+#define E1000_FTQF_1588_TIME_STAMP 0x08000000
+#define E1000_FTQF_MASK 0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP 0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575 0x0400
+#define MAX_NUM_VFS 7
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
+#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT 16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
-#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
-#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
/* Per VM Offload register setup */
-#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
-#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
-#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
-#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
-#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
-#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
-#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
-#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
-#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
-#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
-
-#define E1000_VLVF_ARRAY_SIZE 32
-#define E1000_VLVF_VLANID_MASK 0x00000FFF
-#define E1000_VLVF_POOLSEL_SHIFT 12
-#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
-#define E1000_VLVF_LVLAN 0x00100000
-#define E1000_VLVF_VLANID_ENABLE 0x80000000
-
-#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
-#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
-
-#define E1000_IOVCTL 0x05BBC
-#define E1000_IOVCTL_REUSE_VFQ 0x00000001
-
-#define E1000_RPLOLR_STRVLAN 0x40000000
-#define E1000_RPLOLR_STRCRC 0x80000000
-
-#define E1000_DTXCTL_8023LL 0x0004
-#define E1000_DTXCTL_VLAN_ADDED 0x0008
-#define E1000_DTXCTL_OOS_ENABLE 0x0010
-#define E1000_DTXCTL_MDP_EN 0x0020
-#define E1000_DTXCTL_SPOOF_INT 0x0040
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN 0x40000000
+#define E1000_RPLOLR_STRCRC 0x80000000
+
+#define E1000_TCTL_EXT_COLD 0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL 0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN 0x0020
+#define E1000_DTXCTL_SPOOF_INT 0x0040
#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
-#define ALL_QUEUES 0xFFFF
-
-/* RX packet buffer size defines */
-#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
-void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
-void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
-void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
-u16 igb_rxpbs_adjust_82580(u32 data);
-s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
-s32 igb_set_eee_i350(struct e1000_hw *);
-s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
-
+#define ALL_QUEUES 0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+s32 e1000_init_hw_82575(struct e1000_hw *hw);
+
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
+s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M);
+s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
+s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw);
+s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
@@ -278,4 +487,26 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
#define E1000_EMC_DIODE3_DATA 0x2A
#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
-#endif
+
+s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define E1000_I2C_T_HD_STA 4
+#define E1000_I2C_T_LOW 5
+#define E1000_I2C_T_HIGH 4
+#define E1000_I2C_T_SU_STA 5
+#define E1000_I2C_T_HD_DATA 5
+#define E1000_I2C_T_SU_DATA 1
+#define E1000_I2C_T_RISE 1
+#define E1000_I2C_T_FALL 1
+#define E1000_I2C_T_SU_STO 4
+#define E1000_I2C_T_BUF 5
+
+s32 e1000_set_i2c_bb(struct e1000_hw *hw);
+s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+void e1000_i2c_bus_clear(struct e1000_hw *hw);
+#endif /* _E1000_82575_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_api.c b/drivers/net/ethernet/intel/igb/e1000_api.c
new file mode 100644
index 000000000000..a74ab59aafb8
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_api.c
@@ -0,0 +1,1161 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+
+/**
+ * e1000_init_mac_params - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the MAC
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mac.ops.init_params) {
+ ret_val = hw->mac.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("MAC Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mac.init_mac_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the NVM
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->nvm.ops.init_params) {
+ ret_val = hw->nvm.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("NVM Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("nvm.init_nvm_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_phy_params - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->phy.ops.init_params) {
+ ret_val = hw->phy.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("PHY Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("phy.init_phy_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params - Initialize mailbox function pointers
+ * @hw: pointer to the HW structure
+ *
+ * This function initializes the function pointers for the PHY
+ * set of functions. Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ if (hw->mbx.ops.init_params) {
+ ret_val = hw->mbx.ops.init_params(hw);
+ if (ret_val) {
+ DEBUGOUT("Mailbox Initialization Error\n");
+ goto out;
+ }
+ } else {
+ DEBUGOUT("mbx.init_mbx_params was NULL\n");
+ ret_val = -E1000_ERR_CONFIG;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * device ID stored in the hw structure.
+ * MUST BE FIRST FUNCTION CALLED (explicitly or through
+ * e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_set_mac_type");
+
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ case E1000_DEV_ID_I350_DA4:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+ case E1000_DEV_ID_I210_TOOLS_ONLY:
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_TOOLS_ONLY:
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+
+ case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+ case E1000_DEV_ID_I354_SGMII:
+ case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+ mac->type = e1000_i354;
+ break;
+ default:
+ /* Should never have loaded on this device */
+ ret_val = -E1000_ERR_MAC_INIT;
+ break;
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_setup_init_funcs - Initializes function pointers
+ * @hw: pointer to the HW structure
+ * @init_device: true will initialize the rest of the function pointers
+ * getting the device ready for use. false will only set
+ * MAC type and the function pointers for the other init
+ * functions. Passing false will not generate any hardware
+ * reads or writes.
+ *
+ * This function must be called by a driver in order to use the rest
+ * of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+ s32 ret_val;
+
+ /* Can't do much good without knowing the MAC type. */
+ ret_val = e1000_set_mac_type(hw);
+ if (ret_val) {
+ DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+ goto out;
+ }
+
+ if (!hw->hw_addr) {
+ DEBUGOUT("ERROR: Registers not mapped\n");
+ ret_val = -E1000_ERR_CONFIG;
+ goto out;
+ }
+
+ /*
+ * Init function pointers to generic implementations. We do this first
+ * allowing a driver module to override it afterward.
+ */
+ e1000_init_mac_ops_generic(hw);
+ e1000_init_phy_ops_generic(hw);
+ e1000_init_nvm_ops_generic(hw);
+ e1000_init_mbx_ops_generic(hw);
+
+ /*
+ * Set up the init function pointers. These are functions within the
+ * adapter family file that sets up function pointers for the rest of
+ * the functions in that family.
+ */
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ e1000_init_function_pointers_82575(hw);
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ e1000_init_function_pointers_i210(hw);
+ break;
+ default:
+ DEBUGOUT("Hardware not supported\n");
+ ret_val = -E1000_ERR_CONFIG;
+ break;
+ }
+
+ /*
+ * Initialize the rest of the function pointers. These require some
+ * register reads/writes in some cases.
+ */
+ if (!(ret_val) && init_device) {
+ ret_val = e1000_init_mac_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_nvm_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_phy_params(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_init_mbx_params(hw);
+ if (ret_val)
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_get_bus_info - Obtain bus information for adapter
+ * @hw: pointer to the HW structure
+ *
+ * This will obtain information about the HW bus for which the
+ * adapter is attached and stores it in the hw structure. This is a
+ * function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_bus_info)
+ return hw->mac.ops.get_bus_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_clear_vfta - Clear VLAN filter table
+ * @hw: pointer to the HW structure
+ *
+ * This clears the VLAN filter table on the adapter. This is a function
+ * pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.clear_vfta)
+ hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ * e1000_write_vfta - Write value to VLAN filter table
+ * @hw: pointer to the HW structure
+ * @offset: the 32-bit offset in which to write the value to.
+ * @value: the 32-bit value to write at location offset.
+ *
+ * This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ * table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+ if (hw->mac.ops.write_vfta)
+ hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ * e1000_update_mc_addr_list - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ *
+ * Updates the Multicast Table Array.
+ * The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count)
+{
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+ mc_addr_count);
+}
+
+/**
+ * e1000_force_mac_fc - Force MAC flow control
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC's flow control settings. Currently no func pointer exists
+ * and all implementations are handled in the generic version of this
+ * function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+ return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ * e1000_check_for_link - Check/Store link connection
+ * @hw: pointer to the HW structure
+ *
+ * This checks the link condition of the adapter and stores the
+ * results in the hw->mac structure. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_for_link)
+ return hw->mac.ops.check_for_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_check_mng_mode - Check management mode
+ * @hw: pointer to the HW structure
+ *
+ * This checks if the adapter has manageability enabled.
+ * This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.check_mng_mode)
+ return hw->mac.ops.check_mng_mode(hw);
+
+ return false;
+}
+
+/**
+ * e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+ return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ * e1000_reset_hw - Reset hardware
+ * @hw: pointer to the HW structure
+ *
+ * This resets the hardware into a known state. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.reset_hw)
+ return hw->mac.ops.reset_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_init_hw - Initialize hardware
+ * @hw: pointer to the HW structure
+ *
+ * This inits the hardware readying it for operation. This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_hw)
+ return hw->mac.ops.init_hw(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_link - Configures link and flow control
+ * @hw: pointer to the HW structure
+ *
+ * This configures link and flow control settings for the adapter. This
+ * is a function pointer entry point called by drivers. While modules can
+ * also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_link)
+ return hw->mac.ops.setup_link(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_get_speed_and_duplex - Returns current speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to a 16-bit value to store the speed
+ * @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ * This returns the speed and duplex of the adapter in the two 'out'
+ * variables passed in. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+ if (hw->mac.ops.get_link_up_info)
+ return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_setup_led - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.setup_led)
+ return hw->mac.ops.setup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led - Restores SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This restores the SW controllable LED to the value saved off by
+ * e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.cleanup_led)
+ return hw->mac.ops.cleanup_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_blink_led - Blink SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This starts the adapter LED blinking. Request the LED to be setup first
+ * and cleaned up after. This is a function pointer entry point called by
+ * drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.blink_led)
+ return hw->mac.ops.blink_led(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_id_led_init - store LED configurations in SW
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the LED config in SW. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.id_led_init)
+ return hw->mac.ops.id_led_init(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_on - Turn on SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED on. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_on)
+ return hw->mac.ops.led_on(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off - Turn off SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * Turns the SW defined LED off. This is a function pointer entry point
+ * called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.led_off)
+ return hw->mac.ops.led_off(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_reset_adaptive - Reset adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Resets the adaptive IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+ e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ * e1000_update_adaptive - Update adaptive IFS
+ * @hw: pointer to the HW structure
+ *
+ * Updates adapter IFS. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+ e1000_update_adaptive_generic(hw);
+}
+
+/**
+ * e1000_disable_pcie_master - Disable PCI-Express master access
+ * @hw: pointer to the HW structure
+ *
+ * Disables PCI-Express master access and verifies there are no pending
+ * requests. Currently no func pointer exists and all implementations are
+ * handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+ return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ * e1000_config_collision_dist - Configure collision distance
+ * @hw: pointer to the HW structure
+ *
+ * Configures the collision distance to the default value and is used
+ * during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.config_collision_dist)
+ hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ * e1000_rar_set - Sets a receive address register
+ * @hw: pointer to the HW structure
+ * @addr: address to set the RAR to
+ * @index: the RAR to set
+ *
+ * Sets a Receive Address Register (RAR) to the specified address.
+ **/
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+ if (hw->mac.ops.rar_set)
+ return hw->mac.ops.rar_set(hw, addr, index);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ * @hw: pointer to the HW structure
+ *
+ * Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.validate_mdi_setting)
+ return hw->mac.ops.validate_mdi_setting(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_hash_mc_addr - Determines address location in multicast table
+ * @hw: pointer to the HW structure
+ * @mc_addr: Multicast address to hash.
+ *
+ * This hashes an address to determine its location in the multicast
+ * table. Currently no func pointer exists and all implementations
+ * are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+ return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+ return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ * e1000_mng_host_if_write - Writes to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum)
+{
+ return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum);
+}
+
+/**
+ * e1000_mng_write_cmd_header - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ return e1000_mng_write_cmd_header_generic(hw, hdr);
+}
+
+/**
+ * e1000_mng_enable_host_if - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+ return e1000_mng_enable_host_if_generic(hw);
+}
+
+/**
+ * e1000_check_reset_block - Verifies PHY can be reset
+ * @hw: pointer to the HW structure
+ *
+ * Checks if the PHY is in a state that can be reset or if manageability
+ * has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.check_reset_block)
+ return hw->phy.ops.check_reset_block(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg - Reads PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the buffer to store the 16-bit read.
+ *
+ * Reads the PHY register and returns the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg - Writes PHY register
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_release_phy - Generic release PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return if silicon family does not require a semaphore when accessing the
+ * PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.release)
+ hw->phy.ops.release(hw);
+}
+
+/**
+ * e1000_acquire_phy - Generic acquire PHY
+ * @hw: pointer to the HW structure
+ *
+ * Return success if silicon family does not require a semaphore when
+ * accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.acquire)
+ return hw->phy.ops.acquire(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg - Reads register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to read
+ * @data: the location to store the 16-bit value read.
+ *
+ * Reads a register out of the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_write_kmrn_reg - Writes register using Kumeran interface
+ * @hw: pointer to the HW structure
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes a register to the Kumeran interface. Currently no func pointer
+ * exists and all implementations are handled in the generic version of
+ * this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ * e1000_get_cable_length - Retrieves cable length estimation
+ * @hw: pointer to the HW structure
+ *
+ * This function estimates the cable length and stores them in
+ * hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_cable_length)
+ return hw->phy.ops.get_cable_length(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_get_phy_info - Retrieves PHY information from registers
+ * @hw: pointer to the HW structure
+ *
+ * This function gets some information from various PHY registers and
+ * populates hw->phy values with it. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.get_info)
+ return hw->phy.ops.get_info(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_hw_reset - Hard PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a hard PHY reset. This is a function pointer entry point called
+ * by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_commit - Soft PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Performs a soft PHY reset on those that apply. This is a function pointer
+ * entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.commit)
+ return hw->phy.ops.commit(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d0_lplu_state - Sets low power link up state for D0
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D0
+ * and SmartSpeed is disabled when active is true, else clear lplu for D0
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d0_lplu_state)
+ return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_d3_lplu_state - Sets low power link up state for D3
+ * @hw: pointer to the HW structure
+ * @active: boolean used to enable/disable lplu
+ *
+ * Success returns 0, Failure returns 1
+ *
+ * The low power link up (lplu) state is set to the power management level D3
+ * and SmartSpeed is disabled when active is true, else clear lplu for D3
+ * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
+ * is used during Dx states where the power conservation is most important.
+ * During driver activity, SmartSpeed should be enabled so performance is
+ * maintained. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+ if (hw->phy.ops.set_d3_lplu_state)
+ return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr - Reads MAC address
+ * @hw: pointer to the HW structure
+ *
+ * Reads the MAC address out of the adapter and stores it in the HW structure.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ * e1000_read_pba_string - Read device part number string
+ * @hw: pointer to the HW structure
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number from the EEPROM and stores
+ * the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ * e1000_read_pba_length - Read device part number string length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num.
+ * Currently no func pointer exists and all implementations are handled in the
+ * generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Validates the NVM checksum is correct. This is a function pointer entry
+ * point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.validate)
+ return hw->nvm.ops.validate(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ * @hw: pointer to the HW structure
+ *
+ * Updates the NVM checksum. Currently no func pointer exists and all
+ * implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.update)
+ return hw->nvm.ops.update(hw);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_reload_nvm - Reloads EEPROM
+ * @hw: pointer to the HW structure
+ *
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+ if (hw->nvm.ops.reload)
+ hw->nvm.ops.reload(hw);
+}
+
+/**
+ * e1000_read_nvm - Reads NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to read
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.read)
+ return hw->nvm.ops.read(hw, offset, words, data);
+
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_write_nvm - Writes to NVM (EEPROM)
+ * @hw: pointer to the HW structure
+ * @offset: the word offset to read
+ * @words: number of 16-bit words to write
+ * @data: pointer to the properly sized buffer for the data.
+ *
+ * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ * pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+ if (hw->nvm.ops.write)
+ return hw->nvm.ops.write(hw, offset, words, data);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ * @hw: pointer to the HW structure
+ * @reg: 32bit register offset
+ * @offset: the register to write
+ * @data: the value to write.
+ *
+ * Writes the PHY register at offset with the value in data.
+ * This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data)
+{
+ return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_up)
+ hw->phy.ops.power_up(hw);
+
+ e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+ if (hw->phy.ops.power_down)
+ hw->phy.ops.power_down(hw);
+}
+
+/**
+ * e1000_power_up_fiber_serdes_link - Power up serdes link
+ * @hw: pointer to the HW structure
+ *
+ * Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.power_up_serdes)
+ hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ * e1000_shutdown_fiber_serdes_link - Remove link during power down
+ * @hw: pointer to the HW structure
+ *
+ * Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.shutdown_serdes)
+ hw->mac.ops.shutdown_serdes(hw);
+}
+
+/**
+ * e1000_get_thermal_sensor_data - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ **/
+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.get_thermal_sensor_data)
+ return hw->mac.ops.get_thermal_sensor_data(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ **/
+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.init_thermal_sensor_thresh)
+ return hw->mac.ops.init_thermal_sensor_thresh(hw);
+
+ return E1000_SUCCESS;
+}
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_api.h b/drivers/net/ethernet/intel/igb/e1000_api.h
new file mode 100644
index 000000000000..32fce254685a
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_api.h
@@ -0,0 +1,152 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
+
+s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
+s32 e1000_set_mac_type(struct e1000_hw *hw);
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32 e1000_init_mac_params(struct e1000_hw *hw);
+s32 e1000_init_nvm_params(struct e1000_hw *hw);
+s32 e1000_init_phy_params(struct e1000_hw *hw);
+s32 e1000_init_mbx_params(struct e1000_hw *hw);
+s32 e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32 e1000_force_mac_fc(struct e1000_hw *hw);
+s32 e1000_check_for_link(struct e1000_hw *hw);
+s32 e1000_reset_hw(struct e1000_hw *hw);
+s32 e1000_init_hw(struct e1000_hw *hw);
+s32 e1000_setup_link(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+s32 e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count);
+s32 e1000_setup_led(struct e1000_hw *hw);
+s32 e1000_cleanup_led(struct e1000_hw *hw);
+s32 e1000_check_reset_block(struct e1000_hw *hw);
+s32 e1000_blink_led(struct e1000_hw *hw);
+s32 e1000_led_on(struct e1000_hw *hw);
+s32 e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32 e1000_get_cable_length(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+ u8 data);
+s32 e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32 e1000_acquire_phy(struct e1000_hw *hw);
+s32 e1000_phy_hw_reset(struct e1000_hw *hw);
+s32 e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32 e1000_read_mac_addr(struct e1000_hw *hw);
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
+ u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
+s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ * a = a pointer to struct e1000_hw
+ * status = the 8 bit status field of the Rx descriptor with EOP set
+ * errors = the 8 bit error field of the Rx descriptor with EOP set
+ * length = the sum of all the length fields of the Rx descriptors that
+ * make up the current frame
+ * last_byte = the last byte of the frame DMAed by the hardware
+ * min_frame_size = the minimum frame length we want to accept.
+ * max_frame_size = the maximum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ * ...
+ * if (TBI_ACCEPT) {
+ * accept_frame = true;
+ * e1000_tbi_adjust_stats(adapter, MacAddress);
+ * frame_length--;
+ * } else {
+ * accept_frame = false;
+ * }
+ * ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION 0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, \
+ min_frame_size, max_frame_size) \
+ (e1000_tbi_sbp_enabled_82543(a) && \
+ (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+ ((last_byte) == CARRIER_EXTENSION) && \
+ (((status) & E1000_RXD_STAT_VP) ? \
+ (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \
+ ((length) <= ((max_frame_size) + 1))) : \
+ (((length) > (min_frame_size)) && \
+ ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1)))))
+
+#ifndef E1000_MAX
+#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef E1000_DIVIDE_ROUND_UP
+#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
+#endif
+#endif /* _E1000_API_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 31a0f82cc650..6de398868abf 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -34,355 +31,507 @@
/* Definitions for power management and wakeup registers */
/* Wake Up Control */
-#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_APME 0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
/* Wake Up Filter Control */
-#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
-#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
-#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
-#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
-#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC E1000_WUFC_LNKC
+#define E1000_WUS_MAG E1000_WUFC_MAG
+#define E1000_WUS_EX E1000_WUFC_EX
+#define E1000_WUS_MC E1000_WUFC_MC
+#define E1000_WUS_BC E1000_WUFC_BC
/* Extended Device Control */
-#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */
+#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
/* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD 0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
-#define E1000_CTRL_EXT_EIAME 0x01000000
-#define E1000_CTRL_EXT_IRCA 0x00000001
-/* Interrupt delay cancellation */
-/* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
-/* Interrupt acknowledge Auto-mask */
-/* Clear Interrupt timers after IMS clear */
-/* packet buffer parity error detection enabled */
-/* descriptor FIFO parity error detection enable */
-#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
-#define E1000_I2CCMD_REG_ADDR_SHIFT 16
-#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
-#define E1000_I2CCMD_OPCODE_READ 0x08000000
-#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
-#define E1000_I2CCMD_READY 0x20000000
-#define E1000_I2CCMD_ERROR 0x80000000
-#define E1000_MAX_SGMII_PHY_REG_ADDR 255
-#define E1000_I2CCMD_PHY_TIMEOUT 200
-#define E1000_IVAR_VALID 0x80
-#define E1000_GPIE_NSICR 0x00000001
-#define E1000_GPIE_MSIX_MODE 0x00000010
-#define E1000_GPIE_EIAME 0x40000000
-#define E1000_GPIE_PBA 0x80000000
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
+#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+/* Offset of the link mode field in Ctrl Ext register */
+#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_EIAME 0x01000000
+#define E1000_CTRL_EXT_IRCA 0x00000001
+#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
+#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN 0x00100000
+#define E1000_I2CCMD_REG_ADDR_SHIFT 16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
+#define E1000_I2CCMD_OPCODE_READ 0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE 0x00000000
+#define E1000_I2CCMD_READY 0x20000000
+#define E1000_I2CCMD_ERROR 0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR 255
+#define E1000_I2CCMD_PHY_TIMEOUT 200
+#define E1000_IVAR_VALID 0x80
+#define E1000_GPIE_NSICR 0x00000001
+#define E1000_GPIE_MSIX_MODE 0x00000010
+#define E1000_GPIE_EIAME 0x40000000
+#define E1000_GPIE_PBA 0x80000000
/* Receive Descriptor bit definitions */
-#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
-#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
-#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
-#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
-#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
-#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
-
-#define E1000_RXDEXT_STATERR_LB 0x00040000
-#define E1000_RXDEXT_STATERR_CE 0x01000000
-#define E1000_RXDEXT_STATERR_SE 0x02000000
-#define E1000_RXDEXT_STATERR_SEQ 0x04000000
-#define E1000_RXDEXT_STATERR_CXE 0x10000000
-#define E1000_RXDEXT_STATERR_TCPE 0x20000000
-#define E1000_RXDEXT_STATERR_IPE 0x40000000
-#define E1000_RXDEXT_STATERR_RXE 0x80000000
+#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
+#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
+#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */
+#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
+#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
+#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
+#define E1000_RXDEXT_STATERR_CE 0x01000000
+#define E1000_RXDEXT_STATERR_SE 0x02000000
+#define E1000_RXDEXT_STATERR_SEQ 0x04000000
+#define E1000_RXDEXT_STATERR_CXE 0x10000000
+#define E1000_RXDEXT_STATERR_TCPE 0x20000000
+#define E1000_RXDEXT_STATERR_IPE 0x40000000
+#define E1000_RXDEXT_STATERR_RXE 0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
-#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
-#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
-#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
-#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000
/* Management Control */
-#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
-#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */
-/* Enable Neighbor Discovery Filtering */
-#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
-#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
+#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
-#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST 0x00200000
+
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
/* Receive Control */
-#define E1000_RCTL_EN 0x00000002 /* enable */
-#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
-#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
-#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
-#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
-#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
-#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
-#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
-#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
-#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
-#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
-#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
-#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
-#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
-#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
-#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
-#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
+#define E1000_RCTL_RST 0x00000001 /* Software reset */
+#define E1000_RCTL_EN 0x00000002 /* enable */
+#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
+#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */
+#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */
+#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
+#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
+#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_HEX 0x00010000
+#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
+#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
+#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
+#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
+#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
+#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
+#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */
+#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
+#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
+#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
- * E1000_PSRCTL_BSIZE0_MASK) |
- * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
- * E1000_PSRCTL_BSIZE1_MASK) |
- * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
- * E1000_PSRCTL_BSIZE2_MASK) |
- * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
- * E1000_PSRCTL_BSIZE3_MASK))
+ * E1000_PSRCTL_BSIZE0_MASK) |
+ * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ * E1000_PSRCTL_BSIZE1_MASK) |
+ * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ * E1000_PSRCTL_BSIZE2_MASK) |
+ * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ * E1000_PSRCTL_BSIZE3_MASK))
* where value0 = [128..16256], default=256
* value1 = [1024..64512], default=4096
* value2 = [0..64512], default=4096
* value3 = [0..64512], default=0
*/
-#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
-#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
-#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
-#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
+#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000
-#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
-#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
-#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
-#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
+#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */
/* SWFW_SYNC Definitions */
-#define E1000_SWFW_EEP_SM 0x1
-#define E1000_SWFW_PHY0_SM 0x2
-#define E1000_SWFW_PHY1_SM 0x4
-#define E1000_SWFW_PHY2_SM 0x20
-#define E1000_SWFW_PHY3_SM 0x40
+#define E1000_SWFW_EEP_SM 0x01
+#define E1000_SWFW_PHY0_SM 0x02
+#define E1000_SWFW_PHY1_SM 0x04
+#define E1000_SWFW_CSR_SM 0x08
+#define E1000_SWFW_PHY2_SM 0x20
+#define E1000_SWFW_PHY3_SM 0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
-/* FACTPS Definitions */
/* Device Control */
-#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
-#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
-#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
-#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
-#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
-#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
-#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
-#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
-#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
-#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
-#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
-/* Defined polarity of Dock/Undock indication in SDP[0] */
-/* Reset both PHY ports, through PHYRST_N pin */
-/* enable link status from external LINK_0 and LINK_1 pins */
-#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
-#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_RST 0x04000000 /* Global reset */
-#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
-#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
-#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/* Initiate an interrupt to manageability engine */
-#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
-
-/* Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
-
-#define E1000_CONNSW_ENRGSRC 0x4
-#define E1000_PCS_CFG_PCS_EN 8
-#define E1000_PCS_LCTL_FLV_LINK_UP 1
-#define E1000_PCS_LCTL_FSV_100 2
-#define E1000_PCS_LCTL_FSV_1000 4
-#define E1000_PCS_LCTL_FDV_FULL 8
-#define E1000_PCS_LCTL_FSD 0x10
-#define E1000_PCS_LCTL_FORCE_LINK 0x20
-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
-#define E1000_PCS_LCTL_AN_ENABLE 0x10000
-#define E1000_PCS_LCTL_AN_RESTART 0x20000
-#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
-#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
-
-#define E1000_PCS_LSTS_LINK_OK 1
-#define E1000_PCS_LSTS_SPEED_100 2
-#define E1000_PCS_LSTS_SPEED_1000 4
-#define E1000_PCS_LSTS_DUPLEX_FULL 8
-#define E1000_PCS_LSTS_SYNK_OK 0x10
+#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */
+#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */
+#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */
+#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
+#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
+#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST 0x04000000 /* Global reset */
+#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
+#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
+#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */
+
+#define E1000_CONNSW_ENRGSRC 0x4
+#define E1000_CONNSW_PHYSD 0x400
+#define E1000_CONNSW_PHY_PDN 0x800
+#define E1000_CONNSW_SERDESD 0x200
+#define E1000_CONNSW_AUTOSENSE_CONF 0x2
+#define E1000_CONNSW_AUTOSENSE_EN 0x1
+#define E1000_PCS_CFG_PCS_EN 8
+#define E1000_PCS_LCTL_FLV_LINK_UP 1
+#define E1000_PCS_LCTL_FSV_10 0
+#define E1000_PCS_LCTL_FSV_100 2
+#define E1000_PCS_LCTL_FSV_1000 4
+#define E1000_PCS_LCTL_FDV_FULL 8
+#define E1000_PCS_LCTL_FSD 0x10
+#define E1000_PCS_LCTL_FORCE_LINK 0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LCTL_AN_ENABLE 0x10000
+#define E1000_PCS_LCTL_AN_RESTART 0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK 0x0410
+
+#define E1000_PCS_LSTS_LINK_OK 1
+#define E1000_PCS_LSTS_SPEED_100 2
+#define E1000_PCS_LSTS_SPEED_1000 4
+#define E1000_PCS_LSTS_DUPLEX_FULL 8
+#define E1000_PCS_LSTS_SYNK_OK 0x10
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* Device Status */
-#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
-#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
-#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
-#define E1000_STATUS_FUNC_SHIFT 2
-#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
-#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
-#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
-#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
-/* Change in Dock/Undock state. Clear on write '0'. */
-/* Status of Master requests. */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
-/* BMC external code execution disabled */
-
+#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */
+#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
+#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */
+#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */
#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */
-/* Constants used to intrepret the masked PCI-X bus speed. */
-
-#define SPEED_10 10
-#define SPEED_100 100
-#define SPEED_1000 1000
-#define SPEED_2500 2500
-#define HALF_DUPLEX 1
-#define FULL_DUPLEX 2
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
-#define ADVERTISE_10_HALF 0x0001
-#define ADVERTISE_10_FULL 0x0002
-#define ADVERTISE_100_HALF 0x0004
-#define ADVERTISE_100_FULL 0x0008
-#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
-#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
- ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \
- ADVERTISE_1000_FULL)
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
-
-#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
-#define E1000_LEDCTL_LED0_MODE_SHIFT 0
-#define E1000_LEDCTL_LED0_BLINK 0x00000080
+#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT 0
+#define E1000_LEDCTL_LED0_IVRT 0x00000040
+#define E1000_LEDCTL_LED0_BLINK 0x00000080
-#define E1000_LEDCTL_MODE_LED_ON 0xE
-#define E1000_LEDCTL_MODE_LED_OFF 0xF
+#define E1000_LEDCTL_MODE_LED_ON 0xE
+#define E1000_LEDCTL_MODE_LED_OFF 0xF
/* Transmit Descriptor bit definitions */
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-/* Extended desc bits for Linksec and timesync */
+#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
-#define E1000_TCTL_EN 0x00000002 /* enable tx */
-#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
-#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
-#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
-#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-
-/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
-#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
-#define E1000_DMACR_DMAC_LX_SHIFT 28
-#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
-/* DMA Coalescing BMC-to-OS Watchdog Enable */
-#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
-
-#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
-
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
-
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
-#define E1000_FCRTC_RTH_COAL_SHIFT 4
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
+#define E1000_TCTL_EN 0x00000002 /* enable Tx */
+#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
+#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
+#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
+#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
+#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-/* Timestamp in Rx buffer */
-#define E1000_RXPBS_CFG_TS_EN 0x80000000
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */
/* SerDes Control */
-#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
-#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
-#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
-#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
-#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
/* Header split receive */
-#define E1000_RFCTL_LEF 0x00040000
+#define E1000_RFCTL_NFSW_DIS 0x00000040
+#define E1000_RFCTL_NFSR_DIS 0x00000080
+#define E1000_RFCTL_ACK_DIS 0x00001000
+#define E1000_RFCTL_EXTEN 0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+#define E1000_RFCTL_LEF 0x00040000
/* Collision related configuration parameters */
-#define E1000_COLLISION_THRESHOLD 15
-#define E1000_CT_SHIFT 4
-#define E1000_COLLISION_DISTANCE 63
-#define E1000_COLD_SHIFT 12
+#define E1000_COLLISION_THRESHOLD 15
+#define E1000_CT_SHIFT 4
+#define E1000_COLLISION_DISTANCE 63
+#define E1000_COLD_SHIFT 12
-/* Ethertype field values */
-#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER 9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK 0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT 10
-#define MAX_JUMBO_FRAME_SIZE 0x3F00
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT 20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x3F00
+/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */
+#define MAX_RX_JUMBO_FRAME_SIZE 0x2600
+#define E1000_TX_PTR_GAP 0x1F
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16
+
+#define E1000_PHY_CTRL_D0A_LPLU 0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* PBA constants */
-#define E1000_PBA_34K 0x0022
-#define E1000_PBA_64K 0x0040 /* 64KB */
+#define E1000_PBA_8K 0x0008 /* 8KB */
+#define E1000_PBA_10K 0x000A /* 10KB */
+#define E1000_PBA_12K 0x000C /* 12KB */
+#define E1000_PBA_14K 0x000E /* 14KB */
+#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030 /* 48KB */
+#define E1000_PBA_64K 0x0040 /* 64KB */
+
+#define E1000_PBA_RXA_MASK 0xFFFF
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Uncorrectable/correctable ECC Error counts and enable bits */
+#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
+#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
+#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
+
+#define IFS_MAX 80
+#define IFS_MIN 40
+#define IFS_RATIO 4
+#define IFS_STEP 10
+#define MIN_NUM_XMITS 1000
/* SW Semaphore Register */
-#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
-#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */
/* Interrupt Cause Read */
-#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
-#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
-#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
-#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
-#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
-#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
-#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
+#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
+#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW 0x00008000
+#define E1000_ICR_MNG 0x00040000 /* Manageability event */
+#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
+#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
/* If this bit asserted, the driver should claim the interrupt */
-#define E1000_ICR_INT_ASSERTED 0x80000000
-/* LAN connected device generates an interrupt */
-#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_INT_ASSERTED 0x80000000
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_FER 0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */
/* Extended Interrupt Cause Read */
-#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
-#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
-#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
-#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
-#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
-#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
-#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
-#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
-#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* TCP Timer */
+#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */
/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
@@ -393,238 +542,426 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
/* Interrupt Mask Set */
-#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
-#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
-#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
-#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
-#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
-#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
-#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
-
+#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */
+#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */
+#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
+#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
+#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */
+#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
+#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW
+#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
+#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */
+#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */
/* Extended Interrupt Mask Set */
-#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
/* Interrupt Cause Set */
-#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
-#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
-#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
+#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
+#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
-#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
-
+#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
+#define E1000_EITR_INTERVAL 0x00007FFC
/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
/* Flow Control Constants */
-#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
-#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
-#define FLOW_CONTROL_TYPE 0x8808
-
-/* Transmit Config Word */
-#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
-#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE 0x8808
/* 802.1q VLAN Packet Size */
-#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
-#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
+#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-/* Receive Address */
-/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+/* Receive Address
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
* manageability enabled, allowing us room for 15 multicast addresses.
*/
-#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
-#define E1000_RAL_MAC_ADDR_LEN 4
-#define E1000_RAH_MAC_ADDR_LEN 2
-#define E1000_RAH_POOL_MASK 0x03FC0000
-#define E1000_RAH_POOL_1 0x00040000
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
-#define E1000_SUCCESS 0
-#define E1000_ERR_NVM 1
-#define E1000_ERR_PHY 2
-#define E1000_ERR_CONFIG 3
-#define E1000_ERR_PARAM 4
-#define E1000_ERR_MAC_INIT 5
-#define E1000_ERR_RESET 9
-#define E1000_ERR_MASTER_REQUESTS_PENDING 10
-#define E1000_BLK_PHY_RESET 12
-#define E1000_ERR_SWFW_SYNC 13
-#define E1000_NOT_IMPLEMENTED 14
-#define E1000_ERR_MBX 15
-#define E1000_ERR_INVALID_ARGUMENT 16
-#define E1000_ERR_NO_SPACE 17
-#define E1000_ERR_NVM_PBA_SECTION 18
-#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
-#define E1000_ERR_I2C 20
+#define E1000_SUCCESS 0
+#define E1000_ERR_NVM 1
+#define E1000_ERR_PHY 2
+#define E1000_ERR_CONFIG 3
+#define E1000_ERR_PARAM 4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET 9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET 12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
+#define E1000_ERR_INVALID_ARGUMENT 16
+#define E1000_ERR_NO_SPACE 17
+#define E1000_ERR_NVM_PBA_SECTION 18
+#define E1000_ERR_I2C 19
+#define E1000_ERR_INVM_VALUE_NOT_FOUND 20
/* Loop limit on how long we wait for auto-negotiation to complete */
-#define COPPER_LINK_UP_LIMIT 10
-#define PHY_AUTO_NEG_LIMIT 45
-#define PHY_FORCE_LIMIT 20
+#define FIBER_LINK_UP_LIMIT 50
+#define COPPER_LINK_UP_LIMIT 10
+#define PHY_AUTO_NEG_LIMIT 45
+#define PHY_FORCE_LIMIT 20
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define MASTER_DISABLE_TIMEOUT 800
+#define MASTER_DISABLE_TIMEOUT 800
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
-#define PHY_CFG_TIMEOUT 100
+#define PHY_CFG_TIMEOUT 100
/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT 10
/* Number of milliseconds for NVM auto read done after MAC reset. */
-#define AUTO_READ_DONE_TIMEOUT 10
+#define AUTO_READ_DONE_TIMEOUT 10
/* Flow Control */
-#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
-
-#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
-#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
-
-#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
-#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
-#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
-#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
-#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
-#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
-#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
-#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
-
-#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
-#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
-#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
-#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
-#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
-
-#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
-#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
-#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
-#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
-#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
-#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
-#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
-#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
-
-#define E1000_TIMINCA_16NS_SHIFT 24
-
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
-
-#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
-#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
-#define E1000_MDICNFG_PHY_MASK 0x03E00000
-#define E1000_MDICNFG_PHY_SHIFT 21
+#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */
+#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
+#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
+#define E1000_RXCW_C 0x20000000 /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
+#define E1000_TSICR_TXTS 0x00000002
+#define E1000_TSIM_TXTS 0x00000002
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP 0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP 0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP 0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK 0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT 21
+
+#define E1000_MEDIA_PORT_COPPER 1
+#define E1000_MEDIA_PORT_OTHER 2
+#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3
+#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1 0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7
+#define E1000_M88E1112_PAGE_ADDR 0x16
+#define E1000_M88E1112_STATUS 0x01
+
+#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */
+
+/* I350 EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */
+/* EEE status */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
+#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */
+#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */
+#define E1000_EEE_ADV_DEV_I354 7
+#define E1000_EEE_ADV_ADDR_I354 60
+#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354 3
+#define E1000_PCS_STATUS_ADDR_I354 1
+#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+#define E1000_M88E1512_CFG_REG_1 0x0010
+#define E1000_M88E1512_CFG_REG_2 0x0011
+#define E1000_M88E1512_CFG_REG_3 0x0007
+#define E1000_M88E1512_MODE 0x0014
+#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */
+#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
/* PCI Express Control */
-#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
-#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
-#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
-#define E1000_GCR_CAP_VER2 0x00040000
-
-/* mPHY Address Control and Data Registers */
-#define E1000_MPHY_ADDR_CTL 0x0024 /* mPHY Address Control Register */
-#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
-#define E1000_MPHY_DATA 0x0E10 /* mPHY Data Register */
-
-/* mPHY PCS CLK Register */
-#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* mPHY PCS CLK AFE CSR Offset */
-/* mPHY Near End Digital Loopback Override Bit */
-#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
-
-#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
-#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+#define E1000_GCR_RXD_NO_SNOOP 0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
+#define E1000_GCR_TXD_NO_SNOOP 0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000
+#define E1000_GCR_CAP_VER2 0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
+ E1000_GCR_RXDSCW_NO_SNOOP | \
+ E1000_GCR_RXDSCR_NO_SNOOP | \
+ E1000_GCR_TXD_NO_SNOOP | \
+ E1000_GCR_TXDSCW_NO_SNOOP | \
+ E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
+
+/* mPHY address control and data registers */
+#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA 0x0E10 /* Data Register */
+
+/* AFE CSR Offset for PCS CLK */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004
+/* Override for near end digital loopback. */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Autoneg Advertisement Register */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
+#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */
+#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */
+#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */
+#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */
/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */
+#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */
/* 1000BASE-T Control Register */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
- /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
- /* 0=Automatic Master/Slave config */
+#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+/* 1=Repeater/switch device port 0=DTE device */
+#define CR_1000T_REPEATER_DTE 0x0400
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define CR_1000T_MS_VALUE 0x0800
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define CR_1000T_MS_ENABLE 0x1000
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
/* 1000BASE-T Status Register */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */
+#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */
+#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
-#define PHY_CONTROL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Register */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_CONTROL 0x00 /* Control Register */
+#define PHY_STATUS 0x01 /* Status Register */
+#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
/* NVM Control */
-#define E1000_EECD_SK 0x00000001 /* NVM Clock */
-#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
-#define E1000_EECD_DI 0x00000004 /* NVM Data In */
-#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
-#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
-#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
-#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SK 0x00000001 /* NVM Clock */
+#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI 0x00000004 /* NVM Data In */
+#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES 0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
/* NVM Addressing bits based on type 0=small, 1=large */
-#define E1000_EECD_ADDR_BITS 0x00000400
-#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
-#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
-#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
-#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT 11
+#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */
+#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
-#define E1000_FLUDONE_ATTEMPTS 20000
-#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
-#define E1000_I210_FIFO_SEL_RX 0x00
-#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i))
-#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
-#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
-#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
-#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
-#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
+#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
@@ -633,48 +970,72 @@
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
-/* Offset to data in NVM read/write registers */
-#define E1000_NVM_RW_REG_DATA 16
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START 1 /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
-#define NVM_COMPAT 0x0003
-#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
-#define NVM_VERSION 0x0005
-#define NVM_INIT_CONTROL2_REG 0x000F
-#define NVM_INIT_CONTROL3_PORT_B 0x0014
-#define NVM_INIT_CONTROL3_PORT_A 0x0024
-#define NVM_ALT_MAC_ADDR_PTR 0x0037
-#define NVM_CHECKSUM_REG 0x003F
-#define NVM_COMPATIBILITY_REG_3 0x0003
-#define NVM_COMPATIBILITY_BIT_MASK 0x8000
-#define NVM_MAC_ADDR 0x0000
-#define NVM_SUB_DEV_ID 0x000B
-#define NVM_SUB_VEN_ID 0x000C
-#define NVM_DEV_ID 0x000D
-#define NVM_VEN_ID 0x000E
-#define NVM_INIT_CTRL_2 0x000F
-#define NVM_INIT_CTRL_4 0x0013
-#define NVM_LED_1_CFG 0x001C
-#define NVM_LED_0_2_CFG 0x001F
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_VERSION 0x0005
+#define E1000_I210_NVM_FW_MODULE_PTR 0x0010
+#define E1000_I350_NVM_FW_MODULE_PTR 0x0051
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
/* NVM version defines */
-#define NVM_ETRACK_WORD 0x0042
-#define NVM_COMB_VER_OFF 0x0083
-#define NVM_COMB_VER_PTR 0x003d
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_BUILD_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
+/* FW version defines */
+/* Offset of "Loader patch ptr" in Firmware Header */
+#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01
+/* Patch generation hour & minutes */
+#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04
+/* Patch generation month & day */
+#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05
+/* Patch generation year */
+#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06
+/* Patch major & minor numbers */
+#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07
+
+#define NVM_MAC_ADDR 0x0000
+#define NVM_SUB_DEV_ID 0x000B
+#define NVM_SUB_VEN_ID 0x000C
+#define NVM_DEV_ID 0x000D
+#define NVM_VEN_ID 0x000E
+#define NVM_INIT_CTRL_2 0x000F
+#define NVM_INIT_CTRL_4 0x0013
+#define NVM_LED_1_CFG 0x001C
+#define NVM_LED_0_2_CFG 0x001F
+
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -687,237 +1048,292 @@
#define NVM_ETS_DATA_INDEX_MASK 0x0300
#define NVM_ETS_DATA_INDEX_SHIFT 8
#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
-
-#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
-#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
-#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
-#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
-
-#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
+#define NVM_INIT_3GIO_3 0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A 0x0024
+#define NVM_CFG 0x0012
+#define NVM_ALT_MAC_ADDR_PTR 0x0037
+#define NVM_CHECKSUM_REG 0x003F
+#define NVM_COMPATIBILITY_REG_3 0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0)
/* Mask bits for fields in Word 0x24 of the NVM */
-#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
-#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */
+#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */
+/* Offset of Link Mode bits for 82575/82576 */
+#define NVM_WORD24_LNK_MODE_OFFSET 8
+/* Offset of Link Mode bits for 82580 up */
+#define NVM_WORD24_82580_LNK_MODE_OFFSET 4
/* Mask bits for fields in Word 0x0f of the NVM */
-#define NVM_WORD0F_PAUSE_MASK 0x3000
-#define NVM_WORD0F_ASM_DIR 0x2000
+#define NVM_WORD0F_PAUSE_MASK 0x3000
+#define NVM_WORD0F_PAUSE 0x1000
+#define NVM_WORD0F_ASM_DIR 0x2000
/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK 0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM 0x0800
-/* length of string needed to store part num */
-#define E1000_PBANUM_LENGTH 11
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH 11
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
-#define NVM_SUM 0xBABA
+#define NVM_SUM 0xBABA
-#define NVM_PBA_OFFSET_0 8
-#define NVM_PBA_OFFSET_1 9
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0 8
+#define NVM_PBA_OFFSET_1 9
+#define NVM_PBA_PTR_GUARD 0xFAFA
#define NVM_RESERVED_WORD 0xFFFF
-#define NVM_PBA_PTR_GUARD 0xFAFA
-#define NVM_WORD_SIZE_BASE_SHIFT 6
-
-/* NVM Commands - Microwire */
+#define NVM_WORD_SIZE_BASE_SHIFT 6
/* NVM Commands - SPI */
-#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
-#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
-#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
-#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
-#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
-#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
+#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */
/* SPI NVM Status Register */
-#define NVM_STATUS_RDY_SPI 0x01
+#define NVM_STATUS_RDY_SPI 0x01
/* Word definitions for ID LED Settings */
-#define ID_LED_RESERVED_0000 0x0000
-#define ID_LED_RESERVED_FFFF 0xFFFF
-#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
- (ID_LED_OFF1_OFF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-#define ID_LED_DEF1_DEF2 0x1
-#define ID_LED_DEF1_ON2 0x2
-#define ID_LED_DEF1_OFF2 0x3
-#define ID_LED_ON1_DEF2 0x4
-#define ID_LED_ON1_ON2 0x5
-#define ID_LED_ON1_OFF2 0x6
-#define ID_LED_OFF1_DEF2 0x7
-#define ID_LED_OFF1_ON2 0x8
-#define ID_LED_OFF1_OFF2 0x9
-
-#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
-#define IGP_ACTIVITY_LED_ENABLE 0x0300
-#define IGP_LED3_MODE 0x07000000
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2 0x1
+#define ID_LED_DEF1_ON2 0x2
+#define ID_LED_DEF1_OFF2 0x3
+#define ID_LED_ON1_DEF2 0x4
+#define ID_LED_ON1_ON2 0x5
+#define ID_LED_ON1_OFF2 0x6
+#define ID_LED_OFF1_DEF2 0x7
+#define ID_LED_OFF1_ON2 0x8
+#define ID_LED_OFF1_OFF2 0x9
+
+#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCIE_DEVICE_CONTROL2 0x28
-#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+#define PCIX_COMMAND_REGISTER 0xE6
+#define PCIX_STATUS_REGISTER_LO 0xE8
+#define PCIX_STATUS_REGISTER_HI 0xEA
+#define PCI_HEADER_TYPE_REGISTER 0x0E
+#define PCIE_LINK_STATUS 0x12
+#define PCIE_DEVICE_CONTROL2 0x28
+
+#define PCIX_COMMAND_MMRBC_MASK 0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT 0x2
+#define PCIX_STATUS_HI_MMRBC_MASK 0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5
+#define PCIX_STATUS_HI_MMRBC_4K 0x3
+#define PCIX_STATUS_HI_MMRBC_2K 0x2
+#define PCIX_STATUS_LO_FUNC_MASK 0x7
+#define PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define PCIE_LINK_WIDTH_MASK 0x3F0
+#define PCIE_LINK_WIDTH_SHIFT 4
+#define PCIE_LINK_SPEED_MASK 0x0F
+#define PCIE_LINK_SPEED_2500 0x01
+#define PCIE_LINK_SPEED_5000 0x02
+#define PCIE_DEVICE_CONTROL2_16ms 0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN 6
+#endif
-#define PHY_REVISION_MASK 0xFFFFFFF0
-#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
-#define MAX_PHY_MULTI_PAGE_REG 0xF
+#define PHY_REVISION_MASK 0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
-/* Bit definitions for valid PHY IDs. */
-/* I = Integrated
+/* Bit definitions for valid PHY IDs.
+ * I = Integrated
* E = External
*/
-#define M88E1111_I_PHY_ID 0x01410CC0
-#define M88E1112_E_PHY_ID 0x01410C90
-#define I347AT4_E_PHY_ID 0x01410DC0
-#define IGP03E1000_E_PHY_ID 0x02A80390
-#define I82580_I_PHY_ID 0x015403A0
-#define I350_I_PHY_ID 0x015403B0
-#define M88_VENDOR 0x0141
-#define I210_I_PHY_ID 0x01410C00
-#define M88E1545_E_PHY_ID 0x01410EA0
+#define M88E1000_E_PHY_ID 0x01410C50
+#define M88E1000_I_PHY_ID 0x01410C30
+#define M88E1011_I_PHY_ID 0x01410C20
+#define IGP01E1000_I_PHY_ID 0x02A80380
+#define M88E1111_I_PHY_ID 0x01410CC0
+#define M88E1543_E_PHY_ID 0x01410EA0
+#define M88E1512_E_PHY_ID 0x01410DD0
+#define M88E1112_E_PHY_ID 0x01410C90
+#define I347AT4_E_PHY_ID 0x01410DC0
+#define M88E1340M_E_PHY_ID 0x01410DF0
+#define GG82563_E_PHY_ID 0x01410CA0
+#define IGP03E1000_E_PHY_ID 0x02A80390
+#define IFE_E_PHY_ID 0x02A80330
+#define IFE_PLUS_E_PHY_ID 0x02A80320
+#define IFE_C_E_PHY_ID 0x02A80310
+#define I82580_I_PHY_ID 0x015403A0
+#define I350_I_PHY_ID 0x015403B0
+#define I210_I_PHY_ID 0x01410C00
+#define IGP04E1000_E_PHY_ID 0x02A80391
+#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
-#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
-#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */
-#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */
+#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */
+#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */
+#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */
-#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */
-#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */
+#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */
/* M88E1000 PHY Specific Control Register */
-#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
-/* 1=CLK125 low, 0=CLK125 toggling */
-#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
- /* Manual MDI configuration */
-#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+/* MDI Crossover Mode bits 6:5 Manual MDI configuration */
+#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
-#define M88E1000_PSCR_AUTO_X_1000T 0x0040
+#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
-#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
- * 0=Normal 10BASE-T Rx Threshold
- */
-/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
-#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define M88E1000_PSCR_AUTO_X_MODE 0x0060
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */
/* M88E1000 PHY Specific Status Register */
-#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
-#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
-#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
/* 0 = <50M
* 1 = 50-80M
* 2 = 80-110M
* 3 = 110-140M
* 4 = >140M
*/
-#define M88E1000_PSSR_CABLE_LENGTH 0x0380
-#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
-#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+#define M88E1000_PSSR_CABLE_LENGTH 0x0380
+#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
-#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-/* M88E1000 Extended PHY Specific Control Register */
-/* 1 = Lost lock detect enabled.
- * Will assert lost lock and bring
- * link down if idle not seen
- * within 1ms in 1000BASE-T
- */
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
-#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
-#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
-#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
+#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
-/* Intel i347-AT4 Registers */
+/* Intel I347AT4 Registers */
+#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT 0x16
-#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
-#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
-#define I347AT4_PAGE_SELECT 0x16
+/* I347AT4 Extended PHY Specific Control Register */
-/* i347-AT4 Extended PHY Specific Control Register */
-
-/* Number of times we will attempt to autonegotiate before downshifting if we
- * are the master
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
*/
-#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
-#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
-#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
-#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
-#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
-#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
-#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
-#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
-#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
-#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
-
-/* i347-AT4 PHY Cable Diagnostics Control */
-#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
-
-/* Marvell 1112 only registers */
-#define M88E1112_VCT_DSP_DISTANCE 0x001A
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE 0x001A
/* M88EC018 Rev 2 specific DownShift settings */
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
-#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
-/* MDI Control */
-#define E1000_MDIC_DATA_MASK 0x0000FFFF
-#define E1000_MDIC_REG_MASK 0x001F0000
-#define E1000_MDIC_REG_SHIFT 16
-#define E1000_MDIC_PHY_MASK 0x03E00000
-#define E1000_MDIC_PHY_SHIFT 21
-#define E1000_MDIC_OP_WRITE 0x04000000
-#define E1000_MDIC_OP_READ 0x08000000
-#define E1000_MDIC_READY 0x10000000
-#define E1000_MDIC_INT_EN 0x20000000
-#define E1000_MDIC_ERROR 0x40000000
-#define E1000_MDIC_DEST 0x80000000
-
-/* Thermal Sensor */
-#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */
-#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */
-
-/* Energy Efficient Ethernet */
-#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */
-#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */
-#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */
-#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */
-#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
-#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
-#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
-#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
-#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */
-#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
-#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
-#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
-#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
-#define E1000_M88E1545_EEE_CTRL_1 0x0
-#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
-#define E1000_EEE_ADV_DEV_I354 7
-#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
-#define E1000_PCS_STATUS_DEV_I354 3
-#define E1000_PCS_STATUS_ADDR_I354 1
-#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
-#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400
-#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT 5
+#define GG82563_REG(page, reg) \
+ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG 30
-/* SerDes Control */
-#define E1000_GEN_CTL_READY 0x80000000
-#define E1000_GEN_CTL_ADDRESS_SHIFT 8
-#define E1000_GEN_POLL_TIMEOUT 640
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */
+#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */
+#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+/* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21)
-/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+/* Kumeran Mode Control */
+#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16)
+#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */
+
+/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK 0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE 0x04000000
+#define E1000_MDIC_OP_READ 0x08000000
+#define E1000_MDIC_READY 0x10000000
+#define E1000_MDIC_ERROR 0x40000000
+#define E1000_MDIC_DEST 0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY 0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT 8
+#define E1000_GEN_POLL_TIMEOUT 640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT 16
+#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT 16
+
+#define E1000_LSECTXCTRL_EN_MASK 0x00000003
+#define E1000_LSECTXCTRL_DISABLE 0x0
+#define E1000_LSECTXCTRL_AUTH 0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2
+#define E1000_LSECTXCTRL_AISCI 0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK 0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT 2
+#define E1000_LSECRXCTRL_DISABLE 0x0
+#define E1000_LSECRXCTRL_CHECK 0x1
+#define E1000_LSECRXCTRL_STRICT 0x2
+#define E1000_LSECRXCTRL_DROP 0x3
+#define E1000_LSECRXCTRL_PLSH 0x00000040
+#define E1000_LSECRXCTRL_RP 0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
@@ -926,4 +1342,70 @@
#define E1000_RTTBCNRC_RF_INT_MASK \
(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
-#endif
+/* DMA Coalescing register fields */
+/* DMA Coalescing Watchdog Timer */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF
+/* DMA Coalescing Rx Threshold */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000
+#define E1000_DMACR_DMACTHR_SHIFT 16
+/* Lx when no PCIe transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000
+#define E1000_DMACR_DMAC_LX_SHIFT 28
+#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
+
+/* DMA Coalescing Transmit Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF
+
+#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
+
+/* Rx Traffic Rate Threshold */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF
+/* Rx packet rate in current window */
+#define E1000_DMCRTRH_LRPRCW 0x80000000
+
+/* DMA Coal Rx Traffic Current Count */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF
+
+/* Flow ctrl Rx Threshold High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0
+#define E1000_FCRTC_RTH_COAL_SHIFT 4
+/* Lx power decision based on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080
+
+#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */
+#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */
+#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
+/* Proxy Filter Control */
+#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */
+#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */
+#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */
+#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */
+#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */
+/* VF Control */
+#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */
+
+#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */
+/* Lan ID bit field offset in status register */
+#define E1000_STATUS_LAN_ID_OFFSET 2
+#define E1000_VFTA_ENTRIES 128
+#ifndef E1000_UNUSEDARG
+#define E1000_UNUSEDARG
+#endif /* E1000_UNUSEDARG */
+#ifndef ERROR_REPORT
+#define ERROR_REPORT(fmt) do { } while (0)
+#endif /* ERROR_REPORT */
+#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 488abb24a54f..9e4dfc3a822e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,11 +25,7 @@
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/netdevice.h>
-
+#include "e1000_osdep.h"
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -55,35 +48,45 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
-#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
-#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
-#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
-#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
#define E1000_DEV_ID_I350_COPPER 0x1521
#define E1000_DEV_ID_I350_FIBER 0x1522
#define E1000_DEV_ID_I350_SERDES 0x1523
#define E1000_DEV_ID_I350_SGMII 0x1524
+#define E1000_DEV_ID_I350_DA4 0x1546
+#define E1000_DEV_ID_I210_TOOLS_ONLY 0x1531
+#define E1000_DEV_ID_I211_TOOLS_ONLY 0x1532
#define E1000_DEV_ID_I210_COPPER 0x1533
+#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
+#define E1000_DEV_ID_I210_COPPER_IT 0x1535
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
+#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
-#define E1000_REVISION_2 2
-#define E1000_REVISION_4 4
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
-#define E1000_FUNC_0 0
-#define E1000_FUNC_1 1
-#define E1000_FUNC_2 2
-#define E1000_FUNC_3 3
+#define E1000_FUNC_0 0
+#define E1000_FUNC_1 1
+#define E1000_FUNC_2 2
+#define E1000_FUNC_3 3
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
enum e1000_mac_type {
e1000_undefined = 0,
@@ -110,6 +113,7 @@ enum e1000_nvm_type {
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
@@ -129,6 +133,7 @@ enum e1000_phy_type {
e1000_phy_igp_3,
e1000_phy_ife,
e1000_phy_82580,
+ e1000_phy_vf,
e1000_phy_i210,
};
@@ -183,6 +188,177 @@ enum e1000_fc_mode {
e1000_fc_default = 0xFF
};
+enum e1000_ms_type {
+ e1000_ms_hw_default = 0,
+ e1000_ms_force_master,
+ e1000_ms_force_slave,
+ e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+ e1000_smart_speed_default = 0,
+ e1000_smart_speed_on,
+ e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+ e1000_serdes_link_down = 0,
+ e1000_serdes_link_autoneg_progress,
+ e1000_serdes_link_autoneg_complete,
+ e1000_serdes_link_forced_up
+};
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+/* Receive Descriptor */
+struct e1000_rx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 length; /* Length of data DMAed into data buffer */
+ __le16 csum; /* Packet checksum */
+ u8 status; /* Descriptor status */
+ u8 errors; /* Descriptor Errors */
+ __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+ struct {
+ __le64 buffer_addr;
+ __le64 reserved;
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length;
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+ struct {
+ /* one buffer for protocol header(s), three data buffers */
+ __le64 buffer_addr[MAX_PS_BUFFERS];
+ } read;
+ struct {
+ struct {
+ __le32 mrq; /* Multiple Rx Queues */
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length0; /* length of buffer 0 */
+ __le16 vlan; /* VLAN tag */
+ } middle;
+ struct {
+ __le16 header_status;
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
+ } upper;
+ __le64 reserved;
+ } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 cso; /* Checksum offset */
+ u8 cmd; /* Descriptor control */
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 css; /* Checksum start */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+ union {
+ __le32 ip_config;
+ struct {
+ u8 ipcss; /* IP checksum start */
+ u8 ipcso; /* IP checksum offset */
+ __le16 ipcse; /* IP checksum end */
+ } ip_fields;
+ } lower_setup;
+ union {
+ __le32 tcp_config;
+ struct {
+ u8 tucss; /* TCP checksum start */
+ u8 tucso; /* TCP checksum offset */
+ __le16 tucse; /* TCP checksum end */
+ } tcp_fields;
+ } upper_setup;
+ __le32 cmd_and_length;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 hdr_len; /* Header length */
+ __le16 mss; /* Maximum segment size */
+ } fields;
+ } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+ __le64 buffer_addr; /* Address of the descriptor's buffer address */
+ union {
+ __le32 data;
+ struct {
+ __le16 length; /* Data buffer length */
+ u8 typ_len_ext;
+ u8 cmd;
+ } flags;
+ } lower;
+ union {
+ __le32 data;
+ struct {
+ u8 status; /* Descriptor status */
+ u8 popts; /* Packet Options */
+ __le16 special;
+ } fields;
+ } upper;
+};
+
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
u64 crcerrs;
@@ -291,7 +467,7 @@ struct e1000_host_command_header {
u8 checksum;
};
-#define E1000_HI_MAX_DATA_LENGTH 252
+#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
@@ -306,7 +482,7 @@ struct e1000_host_mng_command_header {
u16 command_length;
};
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
@@ -315,52 +491,95 @@ struct e1000_host_mng_command_info {
#include "e1000_mac.h"
#include "e1000_phy.h"
#include "e1000_nvm.h"
+#include "e1000_manage.h"
#include "e1000_mbx.h"
+/* Function pointers for the MAC. */
struct e1000_mac_operations {
+ s32 (*init_params)(struct e1000_hw *);
+ s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
+ bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*cleanup_led)(struct e1000_hw *);
+ void (*clear_hw_cntrs)(struct e1000_hw *);
+ void (*clear_vfta)(struct e1000_hw *);
+ s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
+ s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*led_on)(struct e1000_hw *);
+ s32 (*led_off)(struct e1000_hw *);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
- bool (*check_mng_mode)(struct e1000_hw *);
+ void (*shutdown_serdes)(struct e1000_hw *);
+ void (*power_up_serdes)(struct e1000_hw *);
+ s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ s32 (*setup_led)(struct e1000_hw *);
+ void (*write_vfta)(struct e1000_hw *, u32, u32);
+ void (*config_collision_dist)(struct e1000_hw *);
+ int (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
- void (*release_swfw_sync)(struct e1000_hw *, u16);
-#ifdef CONFIG_IGB_HWMON
+ s32 (*validate_mdi_setting)(struct e1000_hw *);
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
-#endif
-
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ void (*release_swfw_sync)(struct e1000_hw *, u16);
};
+/* When to use various PHY register access functions:
+ *
+ * Func Caller
+ * Function Does Does When to use
+ * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * X_reg L,P,A n/a for simple PHY reg accesses
+ * X_reg_locked P,A L for multiple accesses of different regs
+ * on different pages
+ * X_reg_page A L,P for multiple accesses of different regs
+ * on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
struct e1000_phy_operations {
+ s32 (*init_params)(struct e1000_hw *);
s32 (*acquire)(struct e1000_hw *);
s32 (*check_polarity)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*commit)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*get_info)(struct e1000_hw *);
+ s32 (*set_page)(struct e1000_hw *, u16);
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+ s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
+ void (*power_up)(struct e1000_hw *);
+ void (*power_down)(struct e1000_hw *);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
+/* Function pointers for the NVM. */
struct e1000_nvm_operations {
+ s32 (*init_params)(struct e1000_hw *);
s32 (*acquire)(struct e1000_hw *);
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ void (*reload)(struct e1000_hw *);
s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
};
#define E1000_MAX_SENSORS 3
@@ -376,49 +595,45 @@ struct e1000_thermal_sensor_data {
struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
};
-struct e1000_info {
- s32 (*get_invariants)(struct e1000_hw *);
- struct e1000_mac_operations *mac_ops;
- struct e1000_phy_operations *phy_ops;
- struct e1000_nvm_operations *nvm_ops;
-};
-
-extern const struct e1000_info e1000_82575_info;
-
struct e1000_mac_info {
struct e1000_mac_operations ops;
-
- u8 addr[6];
- u8 perm_addr[6];
+ u8 addr[ETH_ADDR_LEN];
+ u8 perm_addr[ETH_ADDR_LEN];
enum e1000_mac_type type;
+ u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
+ u32 tx_packet_delta;
u32 txcw;
+ u16 current_ifs_val;
+ u16 ifs_max_val;
+ u16 ifs_min_val;
+ u16 ifs_ratio;
+ u16 ifs_step_size;
u16 mta_reg_count;
u16 uta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
bool adaptive_ifs;
+ bool has_fwsm;
bool arc_subsystem_valid;
bool asf_firmware_present;
bool autoneg;
bool autoneg_failed;
- bool disable_hw_init_bits;
bool get_link_status;
- bool ifs_params_forced;
bool in_ifs_mode;
- bool report_tx_early;
+ enum e1000_serdes_link_state serdes_link_state;
bool serdes_has_link;
bool tx_pkt_filtering;
struct e1000_thermal_sensor_data thermal_sensor_data;
@@ -426,7 +641,6 @@ struct e1000_mac_info {
struct e1000_phy_info {
struct e1000_phy_operations ops;
-
enum e1000_phy_type type;
enum e1000_1000t_rx_status local_rx;
@@ -479,20 +693,19 @@ struct e1000_bus_info {
enum e1000_bus_speed speed;
enum e1000_bus_width width;
- u32 snoop;
-
u16 func;
u16 pci_cmd_word;
};
struct e1000_fc_info {
- u32 high_water; /* Flow control high-water mark */
- u32 low_water; /* Flow control low-water mark */
- u16 pause_time; /* Flow control pause timer */
- bool send_xon; /* Flow control send XON */
- bool strict_ieee; /* Strict IEEE mode */
- enum e1000_fc_mode current_mode; /* Type of flow control */
- enum e1000_fc_mode requested_mode;
+ u32 high_water; /* Flow control high-water mark */
+ u32 low_water; /* Flow control low-water mark */
+ u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
+ bool send_xon; /* Flow control send XON */
+ bool strict_ieee; /* Strict IEEE mode */
+ enum e1000_fc_mode current_mode; /* FC mode in effect */
+ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
};
struct e1000_mbx_operations {
@@ -527,7 +740,17 @@ struct e1000_dev_spec_82575 {
bool sgmii_active;
bool global_device_reset;
bool eee_disable;
+ bool module_plugged;
bool clear_semaphore_once;
+ u32 mtu;
+ struct sfp_e1000_flags eth_flags;
+ u8 media_port;
+ bool media_changed;
+};
+
+struct e1000_dev_spec_vf {
+ u32 vf_number;
+ u32 v2p_mailbox;
};
struct e1000_hw {
@@ -546,7 +769,8 @@ struct e1000_hw {
struct e1000_host_mng_dhcp_cookie mng_cookie;
union {
- struct e1000_dev_spec_82575 _82575;
+ struct e1000_dev_spec_82575 _82575;
+ struct e1000_dev_spec_vf vf;
} dev_spec;
u16 device_id;
@@ -557,11 +781,13 @@ struct e1000_hw {
u8 revision_id;
};
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
-#define hw_dbg(format, arg...) \
- netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+#include "e1000_82575.h"
+#include "e1000_i210.h"
/* These functions must be implemented by drivers */
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-#endif /* _E1000_HW_H_ */
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b9..6a3ca7f203ce 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,98 +12,29 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-******************************************************************************/
+*******************************************************************************/
-/* e1000_i210
- * e1000_i211
- */
+#include "e1000_api.h"
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include "e1000_hw.h"
-#include "e1000_i210.h"
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
+static void e1000_release_nvm_i210(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
/**
- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- */
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- /* Get the SW semaphore */
- while (i < timeout) {
- swsm = rd32(E1000_SWSM);
- if (!(swsm & E1000_SWSM_SMBI))
- break;
-
- udelay(50);
- i++;
- }
-
- if (i == timeout) {
- /* In rare circumstances, the SW semaphore may already be held
- * unintentionally. Clear the semaphore once before giving up.
- */
- if (hw->dev_spec._82575.clear_semaphore_once) {
- hw->dev_spec._82575.clear_semaphore_once = false;
- igb_put_hw_semaphore(hw);
- for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- if (!(swsm & E1000_SWSM_SMBI))
- break;
-
- udelay(50);
- }
- }
-
- /* If we do not have the semaphore here, we have to give up. */
- if (i == timeout) {
- hw_dbg("Driver can't access device - SMBI bit is set.\n");
- return -E1000_ERR_NVM;
- }
- }
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- udelay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- igb_put_hw_semaphore(hw);
- hw_dbg("Driver can't access the NVM\n");
- return -E1000_ERR_NVM;
- }
-
- return E1000_SUCCESS;
-}
-
-/**
- * igb_acquire_nvm_i210 - Request for access to EEPROM
+ * e1000_acquire_nvm_i210 - Request for access to EEPROM
* @hw: pointer to the HW structure
*
* Acquire the necessary semaphores for exclusive access to the EEPROM.
@@ -111,32 +42,40 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
{
- return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_acquire_nvm_i210");
+
+ ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+
+ return ret_val;
}
/**
- * igb_release_nvm_i210 - Release exclusive access to EEPROM
+ * e1000_release_nvm_i210 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void e1000_release_nvm_i210(struct e1000_hw *hw)
{
- igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+ DEBUGFUNC("e1000_release_nvm_i210");
+
+ e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
- * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
-s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
@@ -144,60 +83,137 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ DEBUGFUNC("e1000_acquire_swfw_sync_i210");
+
while (i < timeout) {
- if (igb_get_hw_semaphore_i210(hw)) {
+ if (e1000_get_hw_semaphore_i210(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
- swfw_sync = rd32(E1000_SW_FW_SYNC);
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
- /* Firmware currently using resource (fwmask) */
- igb_put_hw_semaphore(hw);
- mdelay(5);
+ /*
+ * Firmware currently using resource (fwmask)
+ * or other software thread using resource (swmask)
+ */
+ e1000_put_hw_semaphore_generic(hw);
+ msec_delay_irq(5);
i++;
}
if (i == timeout) {
- hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+ DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
- wr32(E1000_SW_FW_SYNC, swfw_sync);
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
- igb_put_hw_semaphore(hw);
out:
return ret_val;
}
/**
- * igb_release_swfw_sync_i210 - Release SW/FW semaphore
+ * e1000_release_swfw_sync_i210 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
-void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ DEBUGFUNC("e1000_release_swfw_sync_i210");
+
+ while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
; /* Empty */
- swfw_sync = rd32(E1000_SW_FW_SYNC);
+ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
- wr32(E1000_SW_FW_SYNC, swfw_sync);
+ E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+ e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ DEBUGFUNC("e1000_get_hw_semaphore_i210");
+
+ /* Get the SW semaphore */
+ while (i < timeout) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ i++;
+ }
+
+ if (i == timeout) {
+ /* In rare circumstances, the SW semaphore may already be held
+ * unintentionally. Clear the semaphore once before giving up.
+ */
+ if (hw->dev_spec._82575.clear_semaphore_once) {
+ hw->dev_spec._82575.clear_semaphore_once = false;
+ e1000_put_hw_semaphore_generic(hw);
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ if (!(swsm & E1000_SWSM_SMBI))
+ break;
+
+ usec_delay(50);
+ }
+ }
+
+ /* If we do not have the semaphore here, we have to give up. */
+ if (i == timeout) {
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
+ }
+ }
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ usec_delay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
+ }
- igb_put_hw_semaphore(hw);
+ return E1000_SUCCESS;
}
/**
- * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
* @words: number of words to read
@@ -206,21 +222,22 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
+ DEBUGFUNC("e1000_read_nvm_srrd_i210");
+
/* We cannot hold synchronization semaphores for too long,
* because of forceful takeover procedure. However it is more efficient
- * to read in bursts than synchronizing access for each word.
- */
+ * to read in bursts than synchronizing access for each word. */
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = igb_read_nvm_eerd(hw, offset, count,
+ status = e1000_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
} else {
@@ -235,7 +252,52 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
+ * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ **/
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ DEBUGFUNC("e1000_write_nvm_srwr_i210");
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word. */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = e1000_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
* @words: number of words to write
@@ -243,10 +305,10 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
*
* Writes data to Shadow Ram at offset using EEWR register.
*
- * If igb_update_nvm_checksum is not called after this function , the
+ * If e1000_update_nvm_checksum is not called after this function , the
* Shadow Ram will most likely contain an invalid checksum.
**/
-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
@@ -254,12 +316,15 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u32 attempts = 100000;
s32 ret_val = E1000_SUCCESS;
- /* A check for invalid values: offset too large, too many words,
+ DEBUGFUNC("e1000_write_nvm_srwr");
+
+ /*
+ * A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
- hw_dbg("nvm parameter(s) out of bounds\n");
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
ret_val = -E1000_ERR_NVM;
goto out;
}
@@ -269,19 +334,19 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
(data[i] << E1000_NVM_RW_REG_DATA) |
E1000_NVM_RW_REG_START;
- wr32(E1000_SRWR, eewr);
+ E1000_WRITE_REG(hw, E1000_SRWR, eewr);
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
- rd32(E1000_SRWR)) {
+ E1000_READ_REG(hw, E1000_SRWR)) {
ret_val = E1000_SUCCESS;
break;
}
- udelay(5);
- }
+ usec_delay(5);
+ }
if (ret_val != E1000_SUCCESS) {
- hw_dbg("Shadow RAM write EEWR timed out\n");
+ DEBUGOUT("Shadow RAM write EEWR timed out\n");
break;
}
}
@@ -290,106 +355,109 @@ out:
return ret_val;
}
-/**
- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+/** e1000_read_invm_word_i210 - Reads OTP
* @hw: pointer to the HW structure
- * @offset: offset within the Shadow RAM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow RAM
- *
- * Writes data to Shadow RAM at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * data will not be committed to FLASH and also Shadow RAM will most likely
- * contain an invalid checksum.
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
*
- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
- * partially written.
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
**/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
{
- s32 status = E1000_SUCCESS;
- u16 i, count;
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to write in bursts than synchronizing access for each word.
- */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = igb_write_nvm_srwr(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
+ DEBUGFUNC("e1000_read_invm_word_i210");
- if (status != E1000_SUCCESS)
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ DEBUGOUT2("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
}
-
+ if (status != E1000_SUCCESS)
+ DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
return status;
}
-/**
- * igb_read_nvm_i211 - Read NVM wrapper function for I211
+/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
- * @words: number of words to read
+ * @address: the word address (aka eeprom offset) to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
-s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 E1000_UNUSEDARG words, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
+ DEBUGFUNC("e1000_read_invm_i210");
+
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
- ret_val = igb_read_invm_i211(hw, offset, &data[0]);
- ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
- ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
if (ret_val != E1000_SUCCESS)
- hw_dbg("MAC Addr not found in iNVM\n");
+ DEBUGOUT("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
- igb_read_invm_i211(hw, offset, data);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -403,7 +471,7 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
*data = hw->vendor_id;
break;
default:
- hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
+ DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
*data = NVM_RESERVED_WORD;
break;
}
@@ -411,56 +479,15 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_invm_i211 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == (u8)address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
-/**
- * igb_read_invm_version - Reads iNVM version and image type
+ * e1000_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
* @invm_ver: version structure for the version read
*
* Reads iNVM version and image type.
**/
-s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver) {
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver)
+{
u32 *record = NULL;
u32 *next_record = NULL;
u32 i = 0;
@@ -471,9 +498,11 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
u16 version = 0;
+ DEBUGFUNC("e1000_read_invm_version");
+
/* Read iNVM memory */
for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
buffer[i] = invm_dword;
}
@@ -495,7 +524,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
status = E1000_SUCCESS;
break;
}
- /* Check if we have odd version location
+ /*
+ * Check if we have odd version location
* used and it is the last one used
*/
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
@@ -506,7 +536,8 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
status = E1000_SUCCESS;
break;
}
- /* Check if we have even version location
+ /*
+ * Check if we have even version location
* used and it is the last one used
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
@@ -547,27 +578,30 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
}
/**
- * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+ DEBUGFUNC("e1000_validate_nvm_checksum_i210");
+
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- /* Replace the read function with semaphore grabbing with
+ /*
+ * Replace the read function with semaphore grabbing with
* the one that skips this for a while.
* We have semaphore taken already here.
*/
read_op_ptr = hw->nvm.ops.read;
- hw->nvm.ops.read = igb_read_nvm_eerd;
+ hw->nvm.ops.read = e1000_read_nvm_eerd;
- status = igb_validate_nvm_checksum(hw);
+ status = e1000_validate_nvm_checksum_generic(hw);
/* Revert original read operation. */
hw->nvm.ops.read = read_op_ptr;
@@ -580,130 +614,208 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
return status;
}
+
/**
- * igb_update_nvm_checksum_i210 - Update EEPROM checksum
+ * e1000_update_nvm_checksum_i210 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
- /* Read the first word from the EEPROM. If this times out or fails, do
+ DEBUGFUNC("e1000_update_nvm_checksum_i210");
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
- ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
+ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
if (ret_val != E1000_SUCCESS) {
- hw_dbg("EEPROM read failed\n");
+ DEBUGOUT("EEPROM read failed\n");
goto out;
}
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+ /*
+ * Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
*/
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
- ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
+ ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
if (ret_val) {
hw->nvm.ops.release(hw);
- hw_dbg("NVM Read Error while updating checksum.\n");
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
goto out;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
- ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+ ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
if (ret_val != E1000_SUCCESS) {
hw->nvm.ops.release(hw);
- hw_dbg("NVM Write Error while updating checksum.\n");
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
goto out;
}
hw->nvm.ops.release(hw);
- ret_val = igb_update_flash_i210(hw);
+ ret_val = e1000_update_flash_i210(hw);
} else {
- ret_val = -E1000_ERR_SWFW_SYNC;
+ ret_val = E1000_ERR_SWFW_SYNC;
}
out:
return ret_val;
}
/**
- * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * e1000_get_flash_presence_i210 - Check if flash device is detected.
* @hw: pointer to the HW structure
*
**/
-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
{
- s32 ret_val = -E1000_ERR_NVM;
- u32 i, reg;
+ u32 eec = 0;
+ bool ret_val = false;
- for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
- reg = rd32(E1000_EECD);
- if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
- break;
- }
- udelay(5);
- }
+ DEBUGFUNC("e1000_get_flash_presence_i210");
+
+ eec = E1000_READ_REG(hw, E1000_EECD);
+
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
return ret_val;
}
/**
- * igb_update_flash_i210 - Commit EEPROM to the flash
+ * e1000_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
**/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+s32 e1000_update_flash_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val;
u32 flup;
- ret_val = igb_pool_flash_update_done_i210(hw);
+ DEBUGFUNC("e1000_update_flash_i210");
+
+ ret_val = e1000_pool_flash_update_done_i210(hw);
if (ret_val == -E1000_ERR_NVM) {
- hw_dbg("Flash update time out\n");
+ DEBUGOUT("Flash update time out\n");
goto out;
}
- flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
- wr32(E1000_EECD, flup);
+ flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
+ E1000_WRITE_REG(hw, E1000_EECD, flup);
- ret_val = igb_pool_flash_update_done_i210(hw);
+ ret_val = e1000_pool_flash_update_done_i210(hw);
if (ret_val == E1000_SUCCESS)
- hw_dbg("Flash update complete\n");
+ DEBUGOUT("Flash update complete\n");
else
- hw_dbg("Flash update time out\n");
+ DEBUGOUT("Flash update time out\n");
out:
return ret_val;
}
/**
- * igb_valid_led_default_i210 - Verify a valid default LED config
+ * e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ DEBUGFUNC("e1000_pool_flash_update_done_i210");
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = E1000_READ_REG(hw, E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(5);
+ }
+
+ return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the i210/i211 NVM parameters and function pointers.
+ **/
+static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ DEBUGFUNC("e1000_init_nvm_params_i210");
+
+ ret_val = e1000_init_nvm_params_82575(hw);
+ nvm->ops.acquire = e1000_acquire_nvm_i210;
+ nvm->ops.release = e1000_release_nvm_i210;
+ nvm->ops.valid_led_default = e1000_valid_led_default_i210;
+ if (e1000_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = e1000_read_nvm_srrd_i210;
+ nvm->ops.write = e1000_write_nvm_srwr_i210;
+ nvm->ops.validate = e1000_validate_nvm_checksum_i210;
+ nvm->ops.update = e1000_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = e1000_read_invm_i210;
+ nvm->ops.write = e1000_null_write_nvm;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ }
+ return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_i210 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_i210(struct e1000_hw *hw)
+{
+ e1000_init_function_pointers_82575(hw);
+ hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
+
+ return;
+}
+
+/**
+ * e1000_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
*
* Read the EEPROM for the current default LED configuration. If the
* LED configuration is not valid, set to a valid LED configuration.
**/
-s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
+ DEBUGFUNC("e1000_valid_led_default_i210");
+
ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
+ DEBUGOUT("NVM Read Error\n");
goto out;
}
@@ -723,17 +835,19 @@ out:
}
/**
- * __igb_access_xmdio_reg - Read/write XMDIO register
+ * __e1000_access_xmdio_reg - Read/write XMDIO register
* @hw: pointer to the HW structure
* @address: XMDIO address to program
* @dev_addr: device address to program
* @data: pointer to value to read/write from/to the XMDIO address
* @read: boolean flag to indicate read or write
**/
-static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
- u8 dev_addr, u16 *data, bool read)
+static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+ u8 dev_addr, u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val;
+
+ DEBUGFUNC("__e1000_access_xmdio_reg");
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
if (ret_val)
@@ -764,25 +878,146 @@ static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
}
/**
- * igb_read_xmdio_reg - Read XMDIO register
+ * e1000_read_xmdio_reg - Read XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be read from the EMI address
**/
-s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
{
- return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
}
/**
- * igb_write_xmdio_reg - Write XMDIO register
+ * e1000_write_xmdio_reg - Write XMDIO register
* @hw: pointer to the HW structure
* @addr: XMDIO address to program
* @dev_addr: device address to program
* @data: value to be written to the XMDIO address
**/
-s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
- return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+ DEBUGFUNC("e1000_read_xmdio_reg");
+
+ return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ * e1000_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+static s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+ u16 nvm_word, phy_word, pci_word, tmp_nvm;
+ int i;
+
+ /* Get and set needed register values */
+ wuc = E1000_READ_REG(hw, E1000_WUC);
+ mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+ reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+ E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val);
+
+ /* Get data from NVM, or set default */
+ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+ &nvm_word);
+ if (ret_val != E1000_SUCCESS)
+ nvm_word = E1000_INVM_DEFAULT_AL;
+ tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+ /* check current state directly from internal PHY */
+ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+ E1000_PHY_PLL_FREQ_REG), &phy_word);
+ if ((phy_word & E1000_PHY_PLL_UNCONF)
+ != E1000_PHY_PLL_UNCONF) {
+ ret_val = E1000_SUCCESS;
+ break;
+ } else {
+ ret_val = -E1000_ERR_PHY;
+ }
+ /* directly reset the internal PHY */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+ e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ pci_word |= E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ msec_delay(1);
+ pci_word &= ~E1000_PCI_PMCSR_D3;
+ e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+ reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+ E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val);
+
+ /* restore WUC register */
+ E1000_WRITE_REG(hw, E1000_WUC, wuc);
+ }
+ /* restore MDICNFG setting */
+ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+ return ret_val;
+}
+
+/**
+ * e1000_get_cfg_done_i210 - Read config done bit
+ * @hw: pointer to the HW structure
+ *
+ * Read the management control register for the config done bit for
+ * completion status. NOTE: silicon which is EEPROM-less will fail trying
+ * to read the config done bit, so an error is *ONLY* logged and returns
+ * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
+ * would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw)
+{
+ s32 timeout = PHY_CFG_TIMEOUT;
+ u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+ DEBUGFUNC("e1000_get_cfg_done_i210");
+
+ while (timeout) {
+ if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask)
+ break;
+ msec_delay(1);
+ timeout--;
+ }
+ if (!timeout)
+ DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_hw_i210 - Init hw for I210/I211
+ * @hw: pointer to the HW structure
+ *
+ * Called to initialize hw for i210 hw family.
+ **/
+s32 e1000_init_hw_i210(struct e1000_hw *hw)
+{
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_init_hw_i210");
+ if ((hw->mac.type >= e1000_i210) &&
+ !(e1000_get_flash_presence_i210(hw))) {
+ ret_val = e1000_pll_workaround_i210(hw);
+ if (ret_val != E1000_SUCCESS)
+ return ret_val;
+ }
+ hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210;
+ ret_val = e1000_init_hw_82575(hw);
+ return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index bfc08e05c907..a14e897d26a0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,27 +25,23 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
+bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
+s32 e1000_update_flash_i210(struct e1000_hw *hw);
+s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
+s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
- struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
- u16 data);
+s32 e1000_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
+s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 *data);
+s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
+ u16 data);
+s32 e1000_init_hw_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -71,27 +64,38 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
-#define E1000_INVM_ULT_BYTES_SIZE 8
-#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
-#define E1000_INVM_VER_FIELD_ONE 0x1FF8
-#define E1000_INVM_VER_FIELD_TWO 0x7FE000
-#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
-#define E1000_INVM_MAJOR_MASK 0x3F0
-#define E1000_INVM_MINOR_MASK 0xF
-#define E1000_INVM_MAJOR_SHIFT 4
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_OFF1_OFF2 << 4) | \
- (ID_LED_DEF1_DEF2))
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_OFF2))
#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
+ (ID_LED_OFF1_ON2))
-/* NVM offset defaults for i211 device */
+/* NVM offset defaults for I211 devices */
#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+/* PLL Defines */
+#define E1000_PCI_PMCSR 0x44
+#define E1000_PCI_PMCSR_D3 0x03
+#define E1000_MAX_PLL_TRIES 5
+#define E1000_PHY_PLL_UNCONF 0xFF
+#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
+#define E1000_PHY_PLL_FREQ_REG 0x000E
+#define E1000_INVM_DEFAULT_AL 0x202F
+#define E1000_INVM_AUTOLOAD 0x0A
+#define E1000_INVM_PLL_WO_VAL 0x0010
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 2559d70a2321..f848b995c932 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,61 +12,168 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#include <linux/if_ether.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
+#include "e1000_api.h"
+
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+static void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+
+/**
+ * e1000_init_mac_ops_generic - Initialize MAC function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ DEBUGFUNC("e1000_init_mac_ops_generic");
+
+ /* General Setup */
+ mac->ops.init_params = e1000_null_ops_generic;
+ mac->ops.init_hw = e1000_null_ops_generic;
+ mac->ops.reset_hw = e1000_null_ops_generic;
+ mac->ops.setup_physical_interface = e1000_null_ops_generic;
+ mac->ops.get_bus_info = e1000_null_ops_generic;
+ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+ mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+ mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+ mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+ /* LED */
+ mac->ops.cleanup_led = e1000_null_ops_generic;
+ mac->ops.setup_led = e1000_null_ops_generic;
+ mac->ops.blink_led = e1000_null_ops_generic;
+ mac->ops.led_on = e1000_null_ops_generic;
+ mac->ops.led_off = e1000_null_ops_generic;
+ /* LINK */
+ mac->ops.setup_link = e1000_null_ops_generic;
+ mac->ops.get_link_up_info = e1000_null_link_info;
+ mac->ops.check_for_link = e1000_null_ops_generic;
+ /* Management */
+ mac->ops.check_mng_mode = e1000_null_mng_mode;
+ /* VLAN, MC, etc. */
+ mac->ops.update_mc_addr_list = e1000_null_update_mc;
+ mac->ops.clear_vfta = e1000_null_mac_generic;
+ mac->ops.write_vfta = e1000_null_write_vfta;
+ mac->ops.rar_set = e1000_rar_set_generic;
+ mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ * e1000_null_ops_generic - No-op function, returns 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_ops_generic");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mac_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mac_generic");
+ return;
+}
+
+/**
+ * e1000_null_link_info - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d)
+{
+ DEBUGFUNC("e1000_null_link_info");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mng_mode - No-op function, return false
+ * @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_mng_mode");
+ return false;
+}
-#include "e1000_mac.h"
+/**
+ * e1000_null_update_mc - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_update_mc");
+ return;
+}
-#include "igb.h"
+/**
+ * e1000_null_write_vfta - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b)
+{
+ DEBUGFUNC("e1000_null_write_vfta");
+ return;
+}
-static s32 igb_set_default_fc(struct e1000_hw *hw);
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+/**
+ * e1000_null_rar_set - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a)
+{
+ DEBUGFUNC("e1000_null_rar_set");
+ return E1000_SUCCESS;
+}
/**
- * igb_get_bus_info_pcie - Get PCIe bus information
+ * e1000_get_bus_info_pcie_generic - Get PCIe bus information
* @hw: pointer to the HW structure
*
* Determines and stores the system bus information for a particular
* network interface. The following bus information is determined and stored:
* bus speed, bus width, type (PCIe), and PCIe function.
**/
-s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
s32 ret_val;
- u32 reg;
u16 pcie_link_status;
+ DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
bus->type = e1000_bus_type_pci_express;
- ret_val = igb_read_pcie_cap_reg(hw,
- PCI_EXP_LNKSTA,
- &pcie_link_status);
+ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS,
+ &pcie_link_status);
if (ret_val) {
bus->width = e1000_bus_width_unknown;
bus->speed = e1000_bus_speed_unknown;
} else {
- switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_2_5GB:
+ switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+ case PCIE_LINK_SPEED_2500:
bus->speed = e1000_bus_speed_2500;
break;
- case PCI_EXP_LNKSTA_CLS_5_0GB:
+ case PCIE_LINK_SPEED_5000:
bus->speed = e1000_bus_speed_5000;
break;
default:
@@ -75,75 +182,68 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
}
bus->width = (enum e1000_bus_width)((pcie_link_status &
- PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT);
+ PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT);
}
- reg = rd32(E1000_STATUS);
- bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+ mac->ops.set_lan_id(hw);
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_clear_vfta - Clear VLAN filter table
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
* @hw: pointer to the HW structure
*
- * Clears the register array which contains the VLAN filter table by
- * setting all the values to 0.
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
**/
-void igb_clear_vfta(struct e1000_hw *hw)
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
{
- u32 offset;
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- array_wr32(E1000_VFTA, offset, 0);
- wrfl();
- }
+ /* The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = E1000_READ_REG(hw, E1000_STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
}
/**
- * igb_write_vfta - Write value to VLAN filter table
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
* @hw: pointer to the HW structure
- * @offset: register offset in VLAN filter table
- * @value: register value written to VLAN filter table
*
- * Writes value at the given offset in the register array which stores
- * the VLAN filter table.
+ * Sets the LAN function id to zero for a single port device.
**/
-static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
{
- array_wr32(E1000_VFTA, offset, value);
- wrfl();
-}
+ struct e1000_bus_info *bus = &hw->bus;
-/* Due to a hw errata, if the host tries to configure the VFTA register
- * while performing queries from the BMC or DMA, then the VFTA in some
- * cases won't be written.
- */
+ bus->func = 0;
+}
/**
- * igb_clear_vfta_i350 - Clear VLAN filter table
+ * e1000_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to the HW structure
*
* Clears the register array which contains the VLAN filter table by
* setting all the values to 0.
**/
-void igb_clear_vfta_i350(struct e1000_hw *hw)
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
{
u32 offset;
- int i;
- for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- for (i = 0; i < 10; i++)
- array_wr32(E1000_VFTA, offset, 0);
+ DEBUGFUNC("e1000_clear_vfta_generic");
- wrfl();
+ for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+ E1000_WRITE_FLUSH(hw);
}
}
/**
- * igb_write_vfta_i350 - Write value to VLAN filter table
+ * e1000_write_vfta_generic - Write value to VLAN filter table
* @hw: pointer to the HW structure
* @offset: register offset in VLAN filter table
* @value: register value written to VLAN filter table
@@ -151,113 +251,83 @@ void igb_clear_vfta_i350(struct e1000_hw *hw)
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
-static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
{
- int i;
+ DEBUGFUNC("e1000_write_vfta_generic");
- for (i = 0; i < 10; i++)
- array_wr32(E1000_VFTA, offset, value);
-
- wrfl();
+ E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+ E1000_WRITE_FLUSH(hw);
}
/**
- * igb_init_rx_addrs - Initialize receive address's
+ * e1000_init_rx_addrs_generic - Initialize receive address's
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
- * Setups the receive address registers by setting the base receive address
+ * Setup the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
**/
-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+ DEBUGFUNC("e1000_init_rx_addrs_generic");
/* Setup the receive address */
- hw_dbg("Programming MAC Address into RAR[0]\n");
+ DEBUGOUT("Programming MAC Address into RAR[0]\n");
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
- hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+ DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
for (i = 1; i < rar_count; i++)
hw->mac.ops.rar_set(hw, mac_addr, i);
}
/**
- * igb_vfta_set - enable or disable vlan in VLAN filter table
- * @hw: pointer to the HW structure
- * @vid: VLAN id to add or remove
- * @add: if true add filter, if false remove
- *
- * Sets or clears a bit in the VLAN filter table array based on VLAN id
- * and if we are adding or removing the filter
- **/
-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
-{
- u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
- u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
- u32 vfta;
- struct igb_adapter *adapter = hw->back;
- s32 ret_val = 0;
-
- vfta = adapter->shadow_vfta[index];
-
- /* bit was set/cleared before we started */
- if ((!!(vfta & mask)) == add) {
- ret_val = -E1000_ERR_CONFIG;
- } else {
- if (add)
- vfta |= mask;
- else
- vfta &= ~mask;
- }
- if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
- igb_write_vfta_i350(hw, index, vfta);
- else
- igb_write_vfta(hw, index, vfta);
- adapter->shadow_vfta[index] = vfta;
-
- return ret_val;
-}
-
-/**
- * igb_check_alt_mac_addr - Check for alternate MAC addr
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
* @hw: pointer to the HW structure
*
* Checks the nvm for an alternate MAC address. An alternate MAC address
* can be setup by pre-boot software and must be treated like a permanent
- * address and must override the actual permanent MAC address. If an
- * alternate MAC address is found it is saved in the hw struct and
- * programmed into RAR0 and the function returns success, otherwise the
- * function returns an error.
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
**/
-s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
u32 i;
- s32 ret_val = 0;
+ s32 ret_val;
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
- u8 alt_mac_addr[ETH_ALEN];
+ u8 alt_mac_addr[ETH_ADDR_LEN];
+
+ DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+ ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+ if (ret_val)
+ return ret_val;
/* Alternate MAC address is handled by the option ROM for 82580
* and newer. SW support not required.
*/
if (hw->mac.type >= e1000_82580)
- goto out;
+ return E1000_SUCCESS;
ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &nvm_alt_mac_addr_offset);
+ &nvm_alt_mac_addr_offset);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
(nvm_alt_mac_addr_offset == 0x0000))
/* There is no Alternate MAC Address */
- goto out;
+ return E1000_SUCCESS;
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
@@ -266,12 +336,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
if (hw->bus.func == E1000_FUNC_3)
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
- for (i = 0; i < ETH_ALEN; i += 2) {
+ for (i = 0; i < ETH_ADDR_LEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
@@ -279,9 +349,9 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
}
/* if multicast bit is set, the alternate address will not be used */
- if (is_multicast_ether_addr(alt_mac_addr)) {
- hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
- goto out;
+ if (alt_mac_addr[0] & 0x01) {
+ DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+ return E1000_SUCCESS;
}
/* We have a valid alternate MAC address, and we want to treat it the
@@ -290,12 +360,11 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
*/
hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_rar_set - Set receive address register
+ * e1000_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
* @index: receive address array register
@@ -303,16 +372,17 @@ out:
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
+ DEBUGFUNC("e1000_rar_set_generic");
+
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
- rar_low = ((u32) addr[0] |
- ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
@@ -324,60 +394,29 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
- wr32(E1000_RAL(index), rar_low);
- wrfl();
- wr32(E1000_RAH(index), rar_high);
- wrfl();
-}
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
-/**
- * igb_mta_set - Set multicast filter table address
- * @hw: pointer to the HW structure
- * @hash_value: determines the MTA register and bit to set
- *
- * The multicast table address is a register array of 32-bit registers.
- * The hash_value is used to determine what register the bit is in, the
- * current value is read, the new bit is OR'd in and the new value is
- * written back into the register.
- **/
-void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
-{
- u32 hash_bit, hash_reg, mta;
-
- /* The MTA is a register array of 32-bit registers. It is
- * treated like an array of (32*mta_reg_count) bits. We want to
- * set bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
- * mask to bits 31:5 of the hash value which gives us the
- * register we're modifying. The hash bit within that register
- * is determined by the lower 5 bits of the hash value.
- */
- hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
- hash_bit = hash_value & 0x1F;
-
- mta = array_rd32(E1000_MTA, hash_reg);
-
- mta |= (1 << hash_bit);
-
- array_wr32(E1000_MTA, hash_reg, mta);
- wrfl();
+ return E1000_SUCCESS;
}
/**
- * igb_hash_mc_addr - Generate a multicast hash value
+ * e1000_hash_mc_addr_generic - Generate a multicast hash value
* @hw: pointer to the HW structure
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
- * the multicast filter table array address and new table value. See
- * igb_mta_set()
+ * the multicast filter table array address and new table value.
**/
-static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
{
u32 hash_value, hash_mask;
u8 bit_shift = 0;
+ DEBUGFUNC("e1000_hash_mc_addr_generic");
+
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -405,7 +444,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* values resulting from each mc_filter_type...
* [0] [1] [2] [3] [4] [5]
* 01 AA 00 12 34 56
- * LSB MSB
+ * LSB MSB
*
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
@@ -434,7 +473,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
/**
- * igb_update_mc_addr_list - Update Multicast addresses
+ * e1000_update_mc_addr_list_generic - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
@@ -442,150 +481,412 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
**/
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
+ DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
/* clear mta_shadow */
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* update mta_shadow from mc_addr_list */
for (i = 0; (u32) i < mc_addr_count; i++) {
- hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+ hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
- mc_addr_list += (ETH_ALEN);
+ mc_addr_list += (ETH_ADDR_LEN);
}
/* replace the entire MTA table */
for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
- array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
- wrfl();
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+ E1000_WRITE_FLUSH(hw);
}
/**
- * igb_clear_hw_cntrs_base - Clear base hardware counters
+ * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ * @hw: pointer to the HW structure
+ *
+ * In certain situations, a system BIOS may report that the PCIx maximum
+ * memory read byte count (MMRBC) value is higher than than the actual
+ * value. We check the PCIx command register with the current PCIx status
+ * register.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+ u16 cmd_mmrbc;
+ u16 pcix_cmd;
+ u16 pcix_stat_hi_word;
+ u16 stat_mmrbc;
+
+ DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+ /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+ if (hw->bus.type != e1000_bus_type_pcix)
+ return;
+
+ e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+ cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+ PCIX_COMMAND_MMRBC_SHIFT;
+ stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+ PCIX_STATUS_HI_MMRBC_SHIFT;
+ if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+ stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+ if (cmd_mmrbc > stat_mmrbc) {
+ pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+ pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+ e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+ }
+}
+
+/**
+ * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
* @hw: pointer to the HW structure
*
* Clears the base hardware counters by reading the counter registers.
**/
-void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
{
- rd32(E1000_CRCERRS);
- rd32(E1000_SYMERRS);
- rd32(E1000_MPC);
- rd32(E1000_SCC);
- rd32(E1000_ECOL);
- rd32(E1000_MCC);
- rd32(E1000_LATECOL);
- rd32(E1000_COLC);
- rd32(E1000_DC);
- rd32(E1000_SEC);
- rd32(E1000_RLEC);
- rd32(E1000_XONRXC);
- rd32(E1000_XONTXC);
- rd32(E1000_XOFFRXC);
- rd32(E1000_XOFFTXC);
- rd32(E1000_FCRUC);
- rd32(E1000_GPRC);
- rd32(E1000_BPRC);
- rd32(E1000_MPRC);
- rd32(E1000_GPTC);
- rd32(E1000_GORCL);
- rd32(E1000_GORCH);
- rd32(E1000_GOTCL);
- rd32(E1000_GOTCH);
- rd32(E1000_RNBC);
- rd32(E1000_RUC);
- rd32(E1000_RFC);
- rd32(E1000_ROC);
- rd32(E1000_RJC);
- rd32(E1000_TORL);
- rd32(E1000_TORH);
- rd32(E1000_TOTL);
- rd32(E1000_TOTH);
- rd32(E1000_TPR);
- rd32(E1000_TPT);
- rd32(E1000_MPTC);
- rd32(E1000_BPTC);
+ DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+ E1000_READ_REG(hw, E1000_CRCERRS);
+ E1000_READ_REG(hw, E1000_SYMERRS);
+ E1000_READ_REG(hw, E1000_MPC);
+ E1000_READ_REG(hw, E1000_SCC);
+ E1000_READ_REG(hw, E1000_ECOL);
+ E1000_READ_REG(hw, E1000_MCC);
+ E1000_READ_REG(hw, E1000_LATECOL);
+ E1000_READ_REG(hw, E1000_COLC);
+ E1000_READ_REG(hw, E1000_DC);
+ E1000_READ_REG(hw, E1000_SEC);
+ E1000_READ_REG(hw, E1000_RLEC);
+ E1000_READ_REG(hw, E1000_XONRXC);
+ E1000_READ_REG(hw, E1000_XONTXC);
+ E1000_READ_REG(hw, E1000_XOFFRXC);
+ E1000_READ_REG(hw, E1000_XOFFTXC);
+ E1000_READ_REG(hw, E1000_FCRUC);
+ E1000_READ_REG(hw, E1000_GPRC);
+ E1000_READ_REG(hw, E1000_BPRC);
+ E1000_READ_REG(hw, E1000_MPRC);
+ E1000_READ_REG(hw, E1000_GPTC);
+ E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH);
+ E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH);
+ E1000_READ_REG(hw, E1000_RNBC);
+ E1000_READ_REG(hw, E1000_RUC);
+ E1000_READ_REG(hw, E1000_RFC);
+ E1000_READ_REG(hw, E1000_ROC);
+ E1000_READ_REG(hw, E1000_RJC);
+ E1000_READ_REG(hw, E1000_TORL);
+ E1000_READ_REG(hw, E1000_TORH);
+ E1000_READ_REG(hw, E1000_TOTL);
+ E1000_READ_REG(hw, E1000_TOTH);
+ E1000_READ_REG(hw, E1000_TPR);
+ E1000_READ_REG(hw, E1000_TPT);
+ E1000_READ_REG(hw, E1000_MPTC);
+ E1000_READ_REG(hw, E1000_BPTC);
}
/**
- * igb_check_for_copper_link - Check for link (Copper)
+ * e1000_check_for_copper_link_generic - Check for link (Copper)
* @hw: pointer to the HW structure
*
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
**/
-s32 igb_check_for_copper_link(struct e1000_hw *hw)
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
bool link;
+ DEBUGFUNC("e1000_check_for_copper_link");
+
/* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
*/
- if (!mac->get_link_status) {
- ret_val = 0;
- goto out;
- }
+ if (!mac->get_link_status)
+ return E1000_SUCCESS;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
- ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link)
- goto out; /* No link detected */
+ return E1000_SUCCESS; /* No link detected */
mac->get_link_status = false;
/* Check if there was DownShift, must be checked
* immediately after link-up
*/
- igb_check_downshift(hw);
+ e1000_check_downshift_generic(hw);
/* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
- if (!mac->autoneg) {
- ret_val = -E1000_ERR_CONFIG;
- goto out;
- }
+ if (!mac->autoneg)
+ return -E1000_ERR_CONFIG;
/* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
- igb_config_collision_dist(hw);
+ mac->ops.config_collision_dist(hw);
/* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
*/
- ret_val = igb_config_fc_after_link_up(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
if (ret_val)
- hw_dbg("Error configuring flow control\n");
+ DEBUGOUT("Error configuring flow control\n");
-out:
return ret_val;
}
/**
- * igb_setup_link - Setup flow control and link settings
+ * e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), the cable is plugged in (we have signal),
+ * and our link partner is not trying to auto-negotiate with us (we
+ * are receiving idles or data), we need to force link up. We also
+ * need to give auto-negotiation time to complete, in case the cable
+ * was just plugged in. The autoneg_failed flag does this.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
+ !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ * @hw: pointer to the HW structure
+ *
+ * Checks for link up on the hardware. If link is not up and we have
+ * a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 rxcw;
+ u32 ctrl;
+ u32 status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+ /* If we don't have link (auto-negotiation failed or link partner
+ * cannot auto-negotiate), and our link partner is not trying to
+ * auto-negotiate with us (we are receiving idles or data),
+ * we need to force link up. We also need to give auto-negotiation
+ * time to complete.
+ */
+ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+ if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
+ if (!mac->autoneg_failed) {
+ mac->autoneg_failed = true;
+ return E1000_SUCCESS;
+ }
+ DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+ /* Disable auto-negotiation in the TXCW register */
+ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+ /* Force link-up and also force full-duplex. */
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+ /* Configure Flow Control after forcing link up. */
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
+ if (ret_val) {
+ DEBUGOUT("Error configuring flow control\n");
+ return ret_val;
+ }
+ } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+ /* If we are forcing link and we are receiving /C/ ordered
+ * sets, re-enable auto-negotiation in the TXCW register
+ * and disable forced link in the Device Control register
+ * in an attempt to auto-negotiate with our link partner.
+ */
+ DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+ E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+ E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+ mac->serdes_has_link = true;
+ } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+ /* If we force link for non-auto-negotiation switch, check
+ * link status based on MAC synchronization for internal
+ * serdes media type.
+ */
+ /* SYNCH bit and IV bit are sticky. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - forced.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - force failed.\n");
+ }
+ }
+
+ if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU) {
+ /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+ usec_delay(10);
+ rxcw = E1000_READ_REG(hw, E1000_RXCW);
+ if (rxcw & E1000_RXCW_SYNCH) {
+ if (!(rxcw & E1000_RXCW_IV)) {
+ mac->serdes_has_link = true;
+ DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n");
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - no sync.\n");
+ }
+ } else {
+ mac->serdes_has_link = false;
+ DEBUGOUT("SERDES: Link down - autoneg failed\n");
+ }
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_set_default_fc_generic - Set flow control default values
+ * @hw: pointer to the HW structure
+ *
+ * Read the EEPROM for the default values for flow control and store the
+ * values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 nvm_offset = 0;
+
+ DEBUGFUNC("e1000_set_default_fc_generic");
+
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
+ * that determine the hardware's default PAUSE (flow control) mode,
+ * a bit that determines whether the HW defaults to enabling or
+ * disabling auto-negotiation, and the direction of the
+ * SW defined pins. If there is no SW over-ride of the flow
+ * control setting, then the variable hw->fc will
+ * be initialized based on a value in the EEPROM.
+ */
+ if (hw->mac.type == e1000_i350) {
+ nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG +
+ nvm_offset,
+ 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw,
+ NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
+
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
+ hw->fc.requested_mode = e1000_fc_none;
+ else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+ NVM_WORD0F_ASM_DIR)
+ hw->fc.requested_mode = e1000_fc_tx_pause;
+ else
+ hw->fc.requested_mode = e1000_fc_full;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_link_generic - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
@@ -594,91 +895,260 @@ out:
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
**/
-s32 igb_setup_link(struct e1000_hw *hw)
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_link_generic");
/* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (igb_check_reset_block(hw))
- goto out;
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ return E1000_SUCCESS;
/* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == e1000_fc_default) {
- ret_val = igb_set_default_fc(hw);
+ ret_val = e1000_set_default_fc_generic(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
- /* We want to save off the original Flow Control configuration just
- * in case we get disconnected and then reconnected into a different
- * hub or switch with different Flow Control capabilities.
+ /* Save off the requested flow control mode for use later. Depending
+ * on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
- hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+ DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+ hw->fc.current_mode);
/* Call the necessary media_type subroutine to configure the link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
if (ret_val)
- goto out;
+ return ret_val;
/* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
*/
- hw_dbg("Initializing the Flow Control address, type and timer regs\n");
- wr32(E1000_FCT, FLOW_CONTROL_TYPE);
- wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
- wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+ DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+ E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+ E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+ E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
- wr32(E1000_FCTTV, hw->fc.pause_time);
+ E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
- ret_val = igb_set_fc_watermarks(hw);
+ return e1000_set_fc_watermarks_generic(hw);
+}
-out:
+/**
+ * e1000_commit_fc_settings_generic - Configure flow control
+ * @hw: pointer to the HW structure
+ *
+ * Write the flow control settings to the Transmit Config Word Register (TXCW)
+ * base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 txcw;
+
+ DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+ /* Check for a software override of the flow control settings, and
+ * setup the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Transmit Config Word Register (TXCW) and re-start auto-
+ * negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
+ *
+ * The possible values of the "fc" parameter are:
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames,
+ * but not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we
+ * do not support receiving pause frames).
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
+ */
+ switch (hw->fc.current_mode) {
+ case e1000_fc_none:
+ /* Flow control completely disabled by a software over-ride. */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+ break;
+ case e1000_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is disabled
+ * by a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ case e1000_fc_tx_pause:
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+ break;
+ case e1000_fc_full:
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
+ txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+ break;
+ default:
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
+ break;
+ }
+
+ E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+ mac->txcw = txcw;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_poll_fiber_serdes_link_generic - Poll for link up
+ * @hw: pointer to the HW structure
+ *
+ * Polls for link up by reading the status register, if link fails to come
+ * up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ u32 i, status;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+ /* If we have a signal (the cable is plugged in, or assumed true for
+ * serdes media) then poll for a "Link-Up" indication in the Device
+ * Status Register. Time-out if a link isn't seen in 500 milliseconds
+ * seconds (Auto-negotiation should complete in less than 500
+ * milliseconds even if the other end is doing it in SW).
+ */
+ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+ msec_delay(10);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ break;
+ }
+ if (i == FIBER_LINK_UP_LIMIT) {
+ DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+ mac->autoneg_failed = true;
+ /* AutoNeg failed to achieve a link, so we'll call
+ * mac->check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
+ */
+ ret_val = mac->ops.check_for_link(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while checking for link\n");
+ return ret_val;
+ }
+ mac->autoneg_failed = false;
+ } else {
+ mac->autoneg_failed = false;
+ DEBUGOUT("Valid Link Found\n");
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ * @hw: pointer to the HW structure
+ *
+ * Configures collision distance and flow control for fiber and serdes
+ * links. Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+ /* Take the link out of reset */
+ ctrl &= ~E1000_CTRL_LRST;
+
+ hw->mac.ops.config_collision_dist(hw);
+
+ ret_val = e1000_commit_fc_settings_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
+ */
+ DEBUGOUT("Auto-negotiation enabled\n");
+
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
+ msec_delay(1);
+
+ /* For these adapters, the SW definable pin 1 is set when the optics
+ * detect a signal. If we have a signal, then poll for a "Link-Up"
+ * indication.
+ */
+ if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+ (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+ ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+ } else {
+ DEBUGOUT("No signal detected\n");
+ }
return ret_val;
}
/**
- * igb_config_collision_dist - Configure collision distance
+ * e1000_config_collision_dist_generic - Configure collision distance
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
- * during link setup. Currently no func pointer exists and all
- * implementations are handled in the generic version of this function.
+ * during link setup.
**/
-void igb_config_collision_dist(struct e1000_hw *hw)
+static void e1000_config_collision_dist_generic(struct e1000_hw *hw)
{
u32 tctl;
- tctl = rd32(E1000_TCTL);
+ DEBUGFUNC("e1000_config_collision_dist_generic");
+
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_COLD;
tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
- wr32(E1000_TCTL, tctl);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+ E1000_WRITE_FLUSH(hw);
}
/**
- * igb_set_fc_watermarks - Set flow control high/low watermarks
+ * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
* @hw: pointer to the HW structure
*
* Sets the flow control high/low threshold (watermark) registers. If
* flow control XON frame transmission is enabled, then set XON frame
- * tansmission as well.
+ * transmission as well.
**/
-static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 fcrtl = 0, fcrth = 0;
+ DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
/* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
@@ -696,53 +1166,14 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
fcrth = hw->fc.high_water;
}
- wr32(E1000_FCRTL, fcrtl);
- wr32(E1000_FCRTH, fcrth);
-
- return ret_val;
-}
-
-/**
- * igb_set_default_fc - Set flow control default values
- * @hw: pointer to the HW structure
- *
- * Read the EEPROM for the default values for flow control and store the
- * values.
- **/
-static s32 igb_set_default_fc(struct e1000_hw *hw)
-{
- s32 ret_val = 0;
- u16 nvm_data;
-
- /* Read and store word 0x0F of the EEPROM. This word contains bits
- * that determine the hardware's default PAUSE (flow control) mode,
- * a bit that determines whether the HW defaults to enabling or
- * disabling auto-negotiation, and the direction of the
- * SW defined pins. If there is no SW over-ride of the flow
- * control setting, then the variable hw->fc will
- * be initialized based on a value in the EEPROM.
- */
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
-
- if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
- }
+ E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+ E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
- if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
- hw->fc.requested_mode = e1000_fc_none;
- else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
- NVM_WORD0F_ASM_DIR)
- hw->fc.requested_mode = e1000_fc_tx_pause;
- else
- hw->fc.requested_mode = e1000_fc_full;
-
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_force_mac_fc - Force the MAC's flow control settings
+ * e1000_force_mac_fc_generic - Force the MAC's flow control settings
* @hw: pointer to the HW structure
*
* Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
@@ -751,12 +1182,13 @@ out:
* autonegotiation is managed by the PHY rather than the MAC. Software must
* also configure these bits when link is forced on a fiber connection.
**/
-s32 igb_force_mac_fc(struct e1000_hw *hw)
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
{
u32 ctrl;
- s32 ret_val = 0;
- ctrl = rd32(E1000_CTRL);
+ DEBUGFUNC("e1000_force_mac_fc_generic");
+
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
/* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
@@ -772,10 +1204,10 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* frames but we do not receive pause frames).
- * 3: Both Rx and TX flow control (symmetric) is enabled.
+ * 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
- hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+ DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
switch (hw->fc.current_mode) {
case e1000_fc_none:
@@ -793,19 +1225,17 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
break;
default:
- hw_dbg("Flow control param set incorrectly\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
}
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_config_fc_after_link_up - Configures flow control after link
+ * e1000_config_fc_after_link_up_generic - Configures flow control after link
* @hw: pointer to the HW structure
*
* Checks the status of auto-negotiation after link up to ensure that the
@@ -814,29 +1244,32 @@ out:
* and did not fail, then we configure flow control based on our link
* partner.
**/
-s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
+ DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
/* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
if (mac->autoneg_failed) {
- if (hw->phy.media_type == e1000_media_type_internal_serdes)
- ret_val = igb_force_mac_fc(hw);
+ if (hw->phy.media_type == e1000_media_type_fiber ||
+ hw->phy.media_type == e1000_media_type_internal_serdes)
+ ret_val = e1000_force_mac_fc_generic(hw);
} else {
if (hw->phy.media_type == e1000_media_type_copper)
- ret_val = igb_force_mac_fc(hw);
+ ret_val = e1000_force_mac_fc_generic(hw);
}
if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
- goto out;
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
}
/* Check for the case where we have copper media and auto-neg is
@@ -849,19 +1282,16 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
if (ret_val)
- goto out;
- ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
- &mii_status_reg);
+ return ret_val;
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
- goto out;
+ DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+ return ret_val;
}
/* The AutoNeg process has completed, so we now need to
@@ -871,13 +1301,13 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* flow control was negotiated.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
- &mii_nway_adv_reg);
+ &mii_nway_adv_reg);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
- &mii_nway_lp_ability_reg);
+ &mii_nway_lp_ability_reg);
if (ret_val)
- goto out;
+ return ret_val;
/* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
@@ -914,19 +1344,18 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
+ /* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
+ * FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
- hw_dbg("Flow Control = FULL.\r\n");
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = "
- "RX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
@@ -941,7 +1370,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
- hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
@@ -955,46 +1384,23 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
- }
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
- */
- else if ((hw->fc.requested_mode == e1000_fc_none) ||
- (hw->fc.requested_mode == e1000_fc_tx_pause) ||
- (hw->fc.strict_ieee)) {
- hw->fc.current_mode = e1000_fc_none;
- hw_dbg("Flow Control = NONE.\r\n");
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
} else {
- hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ DEBUGOUT("Flow Control = NONE.\n");
}
/* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
- ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+ ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
if (ret_val) {
- hw_dbg("Error getting link speed and duplex\n");
- goto out;
+ DEBUGOUT("Error getting link speed and duplex\n");
+ return ret_val;
}
if (duplex == HALF_DUPLEX)
@@ -1003,26 +1409,27 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
- ret_val = igb_force_mac_fc(hw);
+ ret_val = e1000_force_mac_fc_generic(hw);
if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
- goto out;
+ DEBUGOUT("Error forcing flow control settings\n");
+ return ret_val;
}
}
+
/* Check for the case where we have SerDes media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
- if ((hw->phy.media_type == e1000_media_type_internal_serdes)
- && mac->autoneg) {
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
/* Read the PCS_LSTS and check to see if AutoNeg
* has completed.
*/
- pcs_status_reg = rd32(E1000_PCS_LSTAT);
+ pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT);
if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
- hw_dbg("PCS Auto Neg has not completed.\n");
+ DEBUGOUT("PCS Auto Neg has not completed.\n");
return ret_val;
}
@@ -1032,8 +1439,8 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* Page Ability Register (PCS_LPAB) to determine how
* flow control was negotiated.
*/
- pcs_adv_reg = rd32(E1000_PCS_ANADV);
- pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+ pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB);
/* Two bits in the Auto Negotiation Advertisement Register
* (PCS_ANADV) and two bits in the Auto Negotiation Base
@@ -1078,10 +1485,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
- hw_dbg("Flow Control = FULL.\n");
+ DEBUGOUT("Flow Control = FULL.\n");
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
}
}
/* For receiving PAUSE frames ONLY.
@@ -1096,7 +1503,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
- hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+ DEBUGOUT("Flow Control = Tx PAUSE frames only.\n");
}
/* For transmitting PAUSE frames ONLY.
*
@@ -1110,35 +1517,34 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
!(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
(pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
- hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ DEBUGOUT("Flow Control = Rx PAUSE frames only.\n");
} else {
/* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
- hw_dbg("Flow Control = NONE.\n");
+ DEBUGOUT("Flow Control = NONE.\n");
}
/* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
- pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+ pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
- wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg);
- ret_val = igb_force_mac_fc(hw);
+ ret_val = e1000_force_mac_fc_generic(hw);
if (ret_val) {
- hw_dbg("Error forcing flow control settings\n");
+ DEBUGOUT("Error forcing flow control settings\n");
return ret_val;
}
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
@@ -1146,183 +1552,185 @@ out:
* Read the status register for the current speed/duplex and store the current
* speed and duplex for copper connections.
**/
-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
{
u32 status;
- status = rd32(E1000_STATUS);
+ DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
if (status & E1000_STATUS_SPEED_1000) {
*speed = SPEED_1000;
- hw_dbg("1000 Mbs, ");
+ DEBUGOUT("1000 Mbs, ");
} else if (status & E1000_STATUS_SPEED_100) {
*speed = SPEED_100;
- hw_dbg("100 Mbs, ");
+ DEBUGOUT("100 Mbs, ");
} else {
*speed = SPEED_10;
- hw_dbg("10 Mbs, ");
+ DEBUGOUT("10 Mbs, ");
}
if (status & E1000_STATUS_FD) {
*duplex = FULL_DUPLEX;
- hw_dbg("Full Duplex\n");
+ DEBUGOUT("Full Duplex\n");
} else {
*duplex = HALF_DUPLEX;
- hw_dbg("Half Duplex\n");
+ DEBUGOUT("Half Duplex\n");
}
- /* Check if it is an I354 2.5Gb backplane connection. */
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- *speed = SPEED_2500;
- *duplex = FULL_DUPLEX;
- hw_dbg("2500 Mbs, ");
- hw_dbg("Full Duplex\n");
- }
- }
+ return E1000_SUCCESS;
+}
- return 0;
+/**
+ * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * Sets the speed and duplex to gigabit full duplex (the only possible option)
+ * for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 *speed, u16 *duplex)
+{
+ DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+ *speed = SPEED_1000;
+ *duplex = FULL_DUPLEX;
+
+ return E1000_SUCCESS;
}
/**
- * igb_get_hw_semaphore - Acquire hardware semaphore
+ * e1000_get_hw_semaphore_generic - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
-s32 igb_get_hw_semaphore(struct e1000_hw *hw)
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
- s32 ret_val = 0;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
+ DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
/* Get the SW semaphore */
while (i < timeout) {
- swsm = rd32(E1000_SWSM);
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usec_delay(50);
i++;
}
if (i == timeout) {
- hw_dbg("Driver can't access device - SMBI bit is set.\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+ return -E1000_ERR_NVM;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
- igb_put_hw_semaphore(hw);
- hw_dbg("Driver can't access the NVM\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ e1000_put_hw_semaphore_generic(hw);
+ DEBUGOUT("Driver can't access the NVM\n");
+ return -E1000_ERR_NVM;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_put_hw_semaphore - Release hardware semaphore
+ * e1000_put_hw_semaphore_generic - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
-void igb_put_hw_semaphore(struct e1000_hw *hw)
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
- swsm = rd32(E1000_SWSM);
+ DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+ swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
- wr32(E1000_SWSM, swsm);
+ E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
- * igb_get_auto_rd_done - Check for auto read completion
+ * e1000_get_auto_rd_done_generic - Check for auto read completion
* @hw: pointer to the HW structure
*
* Check EEPROM for Auto Read done bit.
**/
-s32 igb_get_auto_rd_done(struct e1000_hw *hw)
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
{
s32 i = 0;
- s32 ret_val = 0;
+ DEBUGFUNC("e1000_get_auto_rd_done_generic");
while (i < AUTO_READ_DONE_TIMEOUT) {
- if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
+ if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ msec_delay(1);
i++;
}
if (i == AUTO_READ_DONE_TIMEOUT) {
- hw_dbg("Auto read by HW from NVM has not completed.\n");
- ret_val = -E1000_ERR_RESET;
- goto out;
+ DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+ return -E1000_ERR_RESET;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_valid_led_default - Verify a valid default LED config
+ * e1000_valid_led_default_generic - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
*
* Read the EEPROM for the current default LED configuration. If the
* LED configuration is not valid, set to a valid LED configuration.
**/
-static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
+ DEBUGFUNC("e1000_valid_led_default_generic");
+
ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
- if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
- case e1000_media_type_internal_serdes:
- *data = ID_LED_DEFAULT_82575_SERDES;
- break;
- case e1000_media_type_copper:
- default:
- *data = ID_LED_DEFAULT;
- break;
- }
- }
-out:
- return ret_val;
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+ *data = ID_LED_DEFAULT;
+
+ return E1000_SUCCESS;
}
/**
- * igb_id_led_init -
+ * e1000_id_led_init_generic -
* @hw: pointer to the HW structure
*
**/
-s32 igb_id_led_init(struct e1000_hw *hw)
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
@@ -1332,11 +1740,13 @@ s32 igb_id_led_init(struct e1000_hw *hw)
u16 data, i, temp;
const u16 led_mask = 0x0F;
- ret_val = igb_valid_led_default(hw, &data);
+ DEBUGFUNC("e1000_id_led_init_generic");
+
+ ret_val = hw->nvm.ops.valid_led_default(hw, &data);
if (ret_val)
- goto out;
+ return ret_val;
- mac->ledctl_default = rd32(E1000_LEDCTL);
+ mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
mac->ledctl_mode1 = mac->ledctl_default;
mac->ledctl_mode2 = mac->ledctl_default;
@@ -1378,138 +1788,331 @@ s32 igb_id_led_init(struct e1000_hw *hw)
}
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_cleanup_led - Set LED config to default operation
+ * e1000_setup_led_generic - Configures SW controllable LED
+ * @hw: pointer to the HW structure
+ *
+ * This prepares the SW controllable LED for use and saves the current state
+ * of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+ u32 ledctl;
+
+ DEBUGFUNC("e1000_setup_led_generic");
+
+ if (hw->mac.ops.setup_led != e1000_setup_led_generic)
+ return -E1000_ERR_CONFIG;
+
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+ hw->mac.ledctl_default = ledctl;
+ /* Turn off LED0 */
+ ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_LED0_MODE_MASK);
+ ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+ E1000_LEDCTL_LED0_MODE_SHIFT);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+ } else if (hw->phy.media_type == e1000_media_type_copper) {
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_cleanup_led_generic - Set LED config to default operation
* @hw: pointer to the HW structure
*
* Remove the current LED configuration and set the LED configuration
* to the default value, saved from the EEPROM.
**/
-s32 igb_cleanup_led(struct e1000_hw *hw)
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
{
- wr32(E1000_LEDCTL, hw->mac.ledctl_default);
- return 0;
+ DEBUGFUNC("e1000_cleanup_led_generic");
+
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+ return E1000_SUCCESS;
}
/**
- * igb_blink_led - Blink LED
+ * e1000_blink_led_generic - Blink LED
* @hw: pointer to the HW structure
*
- * Blink the led's which are set to be on.
+ * Blink the LEDs which are set to be on.
**/
-s32 igb_blink_led(struct e1000_hw *hw)
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
{
u32 ledctl_blink = 0;
u32 i;
- /* set the blink bit for each LED that's "on" (0x0E)
- * in ledctl_mode2
- */
- ledctl_blink = hw->mac.ledctl_mode2;
- for (i = 0; i < 4; i++)
- if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
- E1000_LEDCTL_MODE_LED_ON)
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
- (i * 8));
+ DEBUGFUNC("e1000_blink_led_generic");
- wr32(E1000_LEDCTL, ledctl_blink);
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ /* always blink LED0 for PCI-E fiber */
+ ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+ (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+ } else {
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
+ */
+ ledctl_blink = hw->mac.ledctl_mode2;
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
+ }
- return 0;
+ E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+ return E1000_SUCCESS;
}
/**
- * igb_led_off - Turn LED off
+ * e1000_led_on_generic - Turn LED on
+ * @hw: pointer to the HW structure
+ *
+ * Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_on_generic");
+
+ switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl &= ~E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
+ case e1000_media_type_copper:
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+ break;
+ default:
+ break;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_led_off_generic - Turn LED off
* @hw: pointer to the HW structure
*
* Turn LED off.
**/
-s32 igb_led_off(struct e1000_hw *hw)
+s32 e1000_led_off_generic(struct e1000_hw *hw)
{
+ u32 ctrl;
+
+ DEBUGFUNC("e1000_led_off_generic");
+
switch (hw->phy.media_type) {
+ case e1000_media_type_fiber:
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ ctrl |= E1000_CTRL_SWDPIN0;
+ ctrl |= E1000_CTRL_SWDPIO0;
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ break;
case e1000_media_type_copper:
- wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
+ E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
break;
default:
break;
}
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_disable_pcie_master - Disables PCI-express master access
+ * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
* @hw: pointer to the HW structure
+ * @no_snoop: bitmap of snoop events
*
- * Returns 0 (0) if successful, else returns -10
+ * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+ u32 gcr;
+
+ DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+ if (hw->bus.type != e1000_bus_type_pci_express)
+ return;
+
+ if (no_snoop) {
+ gcr = E1000_READ_REG(hw, E1000_GCR);
+ gcr &= ~(PCIE_NO_SNOOP_ALL);
+ gcr |= no_snoop;
+ E1000_WRITE_REG(hw, E1000_GCR, gcr);
+ }
+}
+
+/**
+ * e1000_disable_pcie_master_generic - Disables PCI-express master access
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_SUCCESS if successful, else returns -10
* (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
* the master requests to be disabled.
*
* Disables PCI-Express master access and verifies there are no pending
* requests.
**/
-s32 igb_disable_pcie_master(struct e1000_hw *hw)
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
{
u32 ctrl;
s32 timeout = MASTER_DISABLE_TIMEOUT;
- s32 ret_val = 0;
+
+ DEBUGFUNC("e1000_disable_pcie_master_generic");
if (hw->bus.type != e1000_bus_type_pci_express)
- goto out;
+ return E1000_SUCCESS;
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
while (timeout) {
- if (!(rd32(E1000_STATUS) &
- E1000_STATUS_GIO_MASTER_ENABLE))
+ if (!(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_GIO_MASTER_ENABLE) ||
+ E1000_REMOVED(hw->hw_addr))
break;
- udelay(100);
+ usec_delay(100);
timeout--;
}
if (!timeout) {
- hw_dbg("Master requests are pending.\n");
- ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
- goto out;
+ DEBUGOUT("Master requests are pending.\n");
+ return -E1000_ERR_MASTER_REQUESTS_PENDING;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_validate_mdi_setting - Verify MDI/MDIx settings
+ * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
* @hw: pointer to the HW structure
*
- * Verify that when not using auto-negotitation that MDI/MDIx is correctly
- * set, which is forced to MDI mode only.
+ * Reset the Adaptive Interframe Spacing throttle to default values.
**/
-s32 igb_validate_mdi_setting(struct e1000_hw *hw)
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ struct e1000_mac_info *mac = &hw->mac;
- /* All MDI settings are supported on 82580 and newer. */
- if (hw->mac.type >= e1000_82580)
- goto out;
+ DEBUGFUNC("e1000_reset_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ mac->current_ifs_val = 0;
+ mac->ifs_min_val = IFS_MIN;
+ mac->ifs_max_val = IFS_MAX;
+ mac->ifs_step_size = IFS_STEP;
+ mac->ifs_ratio = IFS_RATIO;
+
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+}
+
+/**
+ * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ * @hw: pointer to the HW structure
+ *
+ * Update the Adaptive Interframe Spacing Throttle value based on the
+ * time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+
+ DEBUGFUNC("e1000_update_adaptive_generic");
+
+ if (!mac->adaptive_ifs) {
+ DEBUGOUT("Not in Adaptive IFS mode!\n");
+ return;
+ }
+
+ if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+ if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+ mac->in_ifs_mode = true;
+ if (mac->current_ifs_val < mac->ifs_max_val) {
+ if (!mac->current_ifs_val)
+ mac->current_ifs_val = mac->ifs_min_val;
+ else
+ mac->current_ifs_val +=
+ mac->ifs_step_size;
+ E1000_WRITE_REG(hw, E1000_AIT,
+ mac->current_ifs_val);
+ }
+ }
+ } else {
+ if (mac->in_ifs_mode &&
+ (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+ mac->current_ifs_val = 0;
+ mac->in_ifs_mode = false;
+ E1000_WRITE_REG(hw, E1000_AIT, 0);
+ }
+ }
+}
+
+/**
+ * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ * set, which is forced to MDI mode only.
+ **/
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_generic");
if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
- hw_dbg("Invalid MDI setting detected\n");
+ DEBUGOUT("Invalid MDI setting detected\n");
hw->phy.mdix = 1;
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ return -E1000_ERR_CONFIG;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
+ * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings
+ * @hw: pointer to the HW structure
+ *
+ * Validate the MDI/MDIx setting, allowing for auto-crossover during forced
+ * operation.
+ **/
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
* @hw: pointer to the HW structure
* @reg: 32bit register offset such as E1000_SCTL
* @offset: register offset to write to
@@ -1519,72 +2122,28 @@ out:
* and they all have the format address << 8 | data and bit 31 is polled for
* completion.
**/
-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
- u32 offset, u8 data)
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data)
{
u32 i, regvalue = 0;
- s32 ret_val = 0;
+
+ DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
/* Set up the address and data */
regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
- wr32(reg, regvalue);
+ E1000_WRITE_REG(hw, reg, regvalue);
/* Poll the ready bit to see if the MDI read completed */
for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
- udelay(5);
- regvalue = rd32(reg);
+ usec_delay(5);
+ regvalue = E1000_READ_REG(hw, reg);
if (regvalue & E1000_GEN_CTL_READY)
break;
}
if (!(regvalue & E1000_GEN_CTL_READY)) {
- hw_dbg("Reg %08x did not indicate ready\n", reg);
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * igb_enable_mng_pass_thru - Enable processing of ARP's
- * @hw: pointer to the HW structure
- *
- * Verifies the hardware needs to leave interface enabled so that frames can
- * be directed to and from the management interface.
- **/
-bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
-{
- u32 manc;
- u32 fwsm, factps;
- bool ret_val = false;
-
- if (!hw->mac.asf_firmware_present)
- goto out;
-
- manc = rd32(E1000_MANC);
-
- if (!(manc & E1000_MANC_RCV_TCO_EN))
- goto out;
-
- if (hw->mac.arc_subsystem_valid) {
- fwsm = rd32(E1000_FWSM);
- factps = rd32(E1000_FACTPS);
-
- if (!(factps & E1000_FACTPS_MNGCG) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
- ret_val = true;
- goto out;
- }
- } else {
- if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
- ret_val = true;
- goto out;
- }
+ DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+ return -E1000_ERR_PHY;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 5e13e83cc608..a3878361095e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,64 +25,57 @@
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
-#include "e1000_hw.h"
-
-#include "e1000_phy.h"
-#include "e1000_nvm.h"
-#include "e1000_defines.h"
-#include "e1000_i210.h"
-
-/* Functions that should not be called directly from drivers but can be used
- * by other files in this 'shared code'
- */
-s32 igb_blink_led(struct e1000_hw *hw);
-s32 igb_check_for_copper_link(struct e1000_hw *hw);
-s32 igb_cleanup_led(struct e1000_hw *hw);
-s32 igb_config_fc_after_link_up(struct e1000_hw *hw);
-s32 igb_disable_pcie_master(struct e1000_hw *hw);
-s32 igb_force_mac_fc(struct e1000_hw *hw);
-s32 igb_get_auto_rd_done(struct e1000_hw *hw);
-s32 igb_get_bus_info_pcie(struct e1000_hw *hw);
-s32 igb_get_hw_semaphore(struct e1000_hw *hw);
-s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
- u16 *duplex);
-s32 igb_id_led_init(struct e1000_hw *hw);
-s32 igb_led_off(struct e1000_hw *hw);
-void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count);
-s32 igb_setup_link(struct e1000_hw *hw);
-s32 igb_validate_mdi_setting(struct e1000_hw *hw);
-s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
- u32 offset, u8 data);
-
-void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
-void igb_clear_vfta(struct e1000_hw *hw);
-void igb_clear_vfta_i350(struct e1000_hw *hw);
-s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
-void igb_config_collision_dist(struct e1000_hw *hw);
-void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
-void igb_put_hw_semaphore(struct e1000_hw *hw);
-void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
-s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
-
-bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
-
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+#ifndef E1000_REMOVED
+#define E1000_REMOVED(a) (0)
+#endif /* E1000_REMOVED */
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32 e1000_null_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32 e1000_blink_led_generic(struct e1000_hw *hw);
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000_led_on_generic(struct e1000_hw *hw);
+s32 e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32 e1000_setup_led_generic(struct e1000_hw *hw);
+s32 e1000_setup_link_generic(struct e1000_hw *hw);
+s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+ u32 offset, u8 data);
+
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_manage.c b/drivers/net/ethernet/intel/igb/e1000_manage.c
new file mode 100644
index 000000000000..40c92e2e1502
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_manage.c
@@ -0,0 +1,551 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+/**
+ * e1000_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ *
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ DEBUGFUNC("e1000_calculate_checksum");
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ * This function checks whether the HOST IF is enabled for command operation
+ * and also checks whether the previous command is completed. It busy waits
+ * in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+ u32 hicr;
+ u8 i;
+
+ DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+ if (!hw->mac.arc_subsystem_valid) {
+ DEBUGOUT("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+ /* check the previous command is completed */
+ for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay_irq(1);
+ }
+
+ if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+ DEBUGOUT("Previous command timeout failed .\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_mng_mode_generic - Generic check management mode
+ * @hw: pointer to the HW structure
+ *
+ * Reads the firmware semaphore register and returns true (>0) if
+ * manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+ u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+ DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+ return (fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ * @hw: pointer to the HW structure
+ *
+ * Enables packet filtering on transmit packets if manageability is enabled
+ * and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+ struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+ u32 *buffer = (u32 *)&hw->mng_cookie;
+ u32 offset;
+ s32 ret_val, hdr_csum, csum;
+ u8 i, len;
+
+ DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+ hw->mac.tx_pkt_filtering = true;
+
+ /* No manageability, no filtering */
+ if (!hw->mac.ops.check_mng_mode(hw)) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* If we can't read from the host interface for whatever
+ * reason, disable filtering.
+ */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val != E1000_SUCCESS) {
+ hw->mac.tx_pkt_filtering = false;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Read in the header. Length and offset are in dwords. */
+ len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+ offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+ for (i = 0; i < len; i++)
+ *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ offset + i);
+ hdr_csum = hdr->checksum;
+ hdr->checksum = 0;
+ csum = e1000_calculate_checksum((u8 *)hdr,
+ E1000_MNG_DHCP_COOKIE_LENGTH);
+ /* If either the checksums or signature don't match, then
+ * the cookie area isn't considered valid, in which case we
+ * take the safe route of assuming Tx filtering is enabled.
+ */
+ if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+ hw->mac.tx_pkt_filtering = true;
+ return hw->mac.tx_pkt_filtering;
+ }
+
+ /* Cookie area is valid, make the final check for filtering. */
+ if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+ hw->mac.tx_pkt_filtering = false;
+
+ return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ * e1000_mng_write_cmd_header_generic - Writes manageability command header
+ * @hw: pointer to the HW structure
+ * @hdr: pointer to the host interface command header
+ *
+ * Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr)
+{
+ u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+ DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+ /* Write the whole command header structure with new checksum. */
+
+ hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+ length >>= 2;
+ /* Write the relevant command block into the ram area. */
+ for (i = 0; i < length; i++) {
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *) hdr + i));
+ E1000_WRITE_FLUSH(hw);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_host_if_write_generic - Write to the manageability host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface buffer
+ * @length: size of the buffer
+ * @offset: location in the buffer to write to
+ * @sum: sum of the data (not checksum)
+ *
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient
+ * way. Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum)
+{
+ u8 *tmp;
+ u8 *bufptr = buffer;
+ u32 data = 0;
+ u16 remaining, i, j, prev_bytes;
+
+ DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+ /* sum = only sum of the data and it is not checksum */
+
+ if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+ return -E1000_ERR_PARAM;
+
+ tmp = (u8 *)&data;
+ prev_bytes = offset & 0x3;
+ offset >>= 2;
+
+ if (prev_bytes) {
+ data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+ for (j = prev_bytes; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+ length -= j - prev_bytes;
+ offset++;
+ }
+
+ remaining = length & 0x3;
+ length -= remaining;
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block into the
+ * ram area.
+ */
+ for (i = 0; i < length; i++) {
+ for (j = 0; j < sizeof(u32); j++) {
+ *(tmp + j) = *bufptr++;
+ *sum += *(tmp + j);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+ if (remaining) {
+ for (j = 0; j < sizeof(u32); j++) {
+ if (j < remaining)
+ *(tmp + j) = *bufptr++;
+ else
+ *(tmp + j) = 0;
+
+ *sum += *(tmp + j);
+ }
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+ data);
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: pointer to the host interface
+ * @length: size of the buffer
+ *
+ * Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length)
+{
+ struct e1000_host_mng_command_header hdr;
+ s32 ret_val;
+ u32 hicr;
+
+ DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+ hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+ hdr.command_length = length;
+ hdr.reserved1 = 0;
+ hdr.reserved2 = 0;
+ hdr.checksum = 0;
+
+ /* Enable the host interface */
+ ret_val = e1000_mng_enable_host_if_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Populate the host interface with the contents of "buffer". */
+ ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
+ sizeof(hdr), &(hdr.checksum));
+ if (ret_val)
+ return ret_val;
+
+ /* Write the manageability command header */
+ ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
+ if (ret_val)
+ return ret_val;
+
+ /* Tell the ARC a new command is pending. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ * @hw: pointer to the HW structure
+ *
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+ u32 manc;
+ u32 fwsm, factps;
+
+ DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+ if (!hw->mac.asf_firmware_present)
+ return false;
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
+
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ return false;
+
+ if (hw->mac.has_fwsm) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
+ return true;
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * e1000_host_interface_command - Writes buffer to host interface
+ * @hw: pointer to the HW structure
+ * @buffer: contains a command to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
+ * else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, i;
+
+ DEBUGFUNC("e1000_host_interface_command");
+
+ if (!(hw->mac.arc_subsystem_valid)) {
+ DEBUGOUT("Hardware doesn't support host interface command.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (!hw->mac.asf_firmware_present) {
+ DEBUGOUT("Firmware is not present.\n");
+ return E1000_SUCCESS;
+ }
+
+ if (length == 0 || length & 0x3 ||
+ length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < length; i++)
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+ *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check command successful completion. */
+ if (i == E1000_HI_COMMAND_TIMEOUT ||
+ (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+ DEBUGOUT("Command has failed with no status valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ for (i = 0; i < length; i++)
+ *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+ E1000_HOST_IF,
+ i);
+
+ return E1000_SUCCESS;
+}
+/**
+ * e1000_load_firmware - Writes proxy FW code buffer to host interface
+ * and execute.
+ * @hw: pointer to the HW structure
+ * @buffer: contains a firmware to write
+ * @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
+ * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+ u32 hicr, hibba, fwsm, icr, i;
+
+ DEBUGFUNC("e1000_load_firmware");
+
+ if (hw->mac.type < e1000_i210) {
+ DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_EN)) {
+ DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+ if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
+ DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
+ DEBUGOUT("Buffer length failure.\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ /* Clear notification from ROM-FW by reading ICR register */
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+
+ /* Reset ROM-FW */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ hicr |= E1000_HICR_FW_RESET_ENABLE;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ hicr |= E1000_HICR_FW_RESET;
+ E1000_WRITE_REG(hw, E1000_HICR, hicr);
+ E1000_WRITE_FLUSH(hw);
+
+ /* Wait till MAC notifies about its readiness after ROM-FW reset */
+ for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
+ icr = E1000_READ_REG(hw, E1000_ICR_V2);
+ if (icr & E1000_ICR_MNG)
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Wait till MAC is ready to accept new FW code */
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ fwsm = E1000_READ_REG(hw, E1000_FWSM);
+ if ((fwsm & E1000_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
+ E1000_FWSM_HI_EN_ONLY_MODE))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for timeout */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("FW reset failed.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ /* Calculate length in DWORDs */
+ length >>= 2;
+
+ /* The device driver writes the relevant FW code block
+ * into the ram area in DWORDs via 1kB ram addressing window.
+ */
+ for (i = 0; i < length; i++) {
+ if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
+ /* Point to correct 1kB ram window */
+ hibba = E1000_HI_FW_BASE_ADDRESS +
+ ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
+ (i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
+
+ E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
+ }
+
+ E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+ i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
+ *((u32 *)buffer + i));
+ }
+
+ /* Setting this bit tells the ARC that a new FW is ready to execute. */
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+ for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+ hicr = E1000_READ_REG(hw, E1000_HICR);
+ if (!(hicr & E1000_HICR_C))
+ break;
+ msec_delay(1);
+ }
+
+ /* Check for successful FW start. */
+ if (i == E1000_HI_COMMAND_TIMEOUT) {
+ DEBUGOUT("New FW did not start within timeout period.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
+ return E1000_SUCCESS;
+}
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_manage.h b/drivers/net/ethernet/intel/igb/e1000_manage.h
new file mode 100644
index 000000000000..09afc1aed497
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_manage.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+ u16 length, u16 offset, u8 *sum);
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+ struct e1000_host_mng_command_header *hdr);
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+ u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+#define E1000_FWSM_FW_VALID 0x00008000
+#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
+#define E1000_HI_FW_BASE_ADDRESS 0x10000
+#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
+#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
+#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf7..6490b33d7768 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,19 +25,47 @@
#include "e1000_mbx.h"
/**
- * igb_read_mbx - Reads a message from the mailbox
+ * e1000_null_mbx_check_for_flag - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_check_flag");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_mbx_transact - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG *msg,
+ u16 E1000_UNUSEDARG size,
+ u16 E1000_UNUSEDARG mbx_id)
+{
+ DEBUGFUNC("e1000_null_mbx_rw_msg");
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfully read message from buffer
+ * returns SUCCESS if it successfuly read message from buffer
**/
-s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_read_mbx");
+
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
@@ -52,7 +77,7 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
}
/**
- * igb_write_mbx - Write a message to the mailbox
+ * e1000_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -60,10 +85,12 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_mbx");
if (size > mbx->size)
ret_val = -E1000_ERR_MBX;
@@ -75,17 +102,19 @@ s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
}
/**
- * igb_check_for_msg - checks to see if someone sent us mail
+ * e1000_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_check_for_msg");
+
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
@@ -93,17 +122,19 @@ s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
}
/**
- * igb_check_for_ack - checks to see if someone sent us ACK
+ * e1000_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_check_for_ack");
+
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
@@ -111,17 +142,19 @@ s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
}
/**
- * igb_check_for_rst - checks to see if other side has reset
+ * e1000_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_check_for_rst");
+
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
@@ -129,17 +162,19 @@ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
}
/**
- * igb_poll_for_msg - Wait for message notification
+ * e1000_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ DEBUGFUNC("e1000_poll_for_msg");
+
if (!countdown || !mbx->ops.check_for_msg)
goto out;
@@ -147,28 +182,30 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
countdown--;
if (!countdown)
break;
- udelay(mbx->usec_delay);
+ usec_delay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
- return countdown ? 0 : -E1000_ERR_MBX;
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
}
/**
- * igb_poll_for_ack - Wait for message acknowledgement
+ * e1000_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ DEBUGFUNC("e1000_poll_for_ack");
+
if (!countdown || !mbx->ops.check_for_ack)
goto out;
@@ -176,18 +213,18 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
countdown--;
if (!countdown)
break;
- udelay(mbx->usec_delay);
+ usec_delay(mbx->usec_delay);
}
/* if we failed, all future posted messages fail until reset */
if (!countdown)
mbx->timeout = 0;
out:
- return countdown ? 0 : -E1000_ERR_MBX;
+ return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
}
/**
- * igb_read_posted_mbx - Wait for message notification and receive message
+ * e1000_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -196,17 +233,19 @@ out:
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_read_posted_mbx");
+
if (!mbx->ops.read)
goto out;
- ret_val = igb_poll_for_msg(hw, mbx_id);
+ ret_val = e1000_poll_for_msg(hw, mbx_id);
+ /* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
@@ -214,7 +253,7 @@ out:
}
/**
- * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -223,12 +262,13 @@ out:
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct e1000_mbx_info *mbx = &hw->mbx;
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_write_posted_mbx");
+
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
@@ -238,37 +278,58 @@ static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
/* if msg sent wait until we receive an ack */
if (!ret_val)
- ret_val = igb_poll_for_ack(hw, mbx_id);
+ ret_val = e1000_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
-static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+/**
+ * e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ mbx->ops.init_params = e1000_null_ops_generic;
+ mbx->ops.read = e1000_null_mbx_transact;
+ mbx->ops.write = e1000_null_mbx_transact;
+ mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+ mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
{
- u32 mbvficr = rd32(E1000_MBVFICR);
+ u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
s32 ret_val = -E1000_ERR_MBX;
if (mbvficr & mask) {
- ret_val = 0;
- wr32(E1000_MBVFICR, mask);
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
}
return ret_val;
}
/**
- * igb_check_for_msg_pf - checks to see if the VF has sent mail
+ * e1000_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
- ret_val = 0;
+ DEBUGFUNC("e1000_check_for_msg_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
hw->mbx.stats.reqs++;
}
@@ -276,18 +337,20 @@ static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
}
/**
- * igb_check_for_ack_pf - checks to see if the VF has ACKed
+ * e1000_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
- if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
- ret_val = 0;
+ DEBUGFUNC("e1000_check_for_ack_pf");
+
+ if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = E1000_SUCCESS;
hw->mbx.stats.acks++;
}
@@ -295,20 +358,22 @@ static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
}
/**
- * igb_check_for_rst_pf - checks to see if the VF has reset
+ * e1000_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
{
- u32 vflre = rd32(E1000_VFLRE);
+ u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
+ DEBUGFUNC("e1000_check_for_rst_pf");
+
if (vflre & (1 << vf_number)) {
- ret_val = 0;
- wr32(E1000_VFLRE, (1 << vf_number));
+ ret_val = E1000_SUCCESS;
+ E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
hw->mbx.stats.rsts++;
}
@@ -316,30 +381,40 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
}
/**
- * igb_obtain_mbx_lock_pf - obtain mailbox lock
+ * e1000_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
+ int count = 10;
- /* Take ownership of the buffer */
- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+ DEBUGFUNC("e1000_obtain_mbx_lock_pf");
- /* reserve mailbox for vf use */
- p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
- ret_val = 0;
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
+ E1000_P2VMAILBOX_PFU);
+
+ /* reserve mailbox for pf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
return ret_val;
+
}
/**
- * igb_write_mbx_pf - Places a message in the mailbox
+ * e1000_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -347,27 +422,29 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
{
s32 ret_val;
u16 i;
+ DEBUGFUNC("e1000_write_mbx_pf");
+
/* lock the mailbox to prevent pf/vf race condition */
- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
- igb_check_for_msg_pf(hw, vf_number);
- igb_check_for_ack_pf(hw, vf_number);
+ e1000_check_for_msg_pf(hw, vf_number);
+ e1000_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
+ E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
@@ -378,7 +455,7 @@ out_no_write:
}
/**
- * igb_read_mbx_pf - Read a message from the mailbox
+ * e1000_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -388,23 +465,25 @@ out_no_write:
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
{
s32 ret_val;
u16 i;
+ DEBUGFUNC("e1000_read_mbx_pf");
+
/* lock the mailbox to prevent pf/vf race condition */
- ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
+ msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
- wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
@@ -419,29 +498,34 @@ out_no_read:
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
-s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = igb_read_mbx_pf;
- mbx->ops.write = igb_write_mbx_pf;
- mbx->ops.read_posted = igb_read_posted_mbx;
- mbx->ops.write_posted = igb_write_posted_mbx;
- mbx->ops.check_for_msg = igb_check_for_msg_pf;
- mbx->ops.check_for_ack = igb_check_for_ack_pf;
- mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
-
- return 0;
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = e1000_read_mbx_pf;
+ mbx->ops.write = e1000_write_mbx_pf;
+ mbx->ops.read_posted = e1000_read_posted_mbx;
+ mbx->ops.write_posted = e1000_write_posted_mbx;
+ mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+ mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+ mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ default:
+ return E1000_SUCCESS;
+ }
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf3..28900216ac25 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,7 +25,7 @@
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
-#include "e1000_hw.h"
+#include "e1000_api.h"
#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
@@ -36,10 +33,10 @@
#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
-#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
-#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
@@ -47,31 +44,41 @@
* PF. The reverse is true if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
-/* Messages below or'd with this are the ACK */
+/* Msgs below or'd with this are the ACK */
#define E1000_VT_MSGTYPE_ACK 0x80000000
-/* Messages below or'd with this are the NACK */
+/* Msgs below or'd with this are the NACK */
#define E1000_VT_MSGTYPE_NACK 0x40000000
/* Indicates that VF is still clear to send requests */
#define E1000_VT_MSGTYPE_CTS 0x20000000
#define E1000_VT_MSGINFO_SHIFT 16
-/* bits 23:16 are used for exra info for certain messages */
+/* bits 23:16 are used for extra info for certain messages */
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
-#define E1000_VF_RESET 0x01 /* VF requests reset */
-#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
-#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
-#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
-#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
-#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
-#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
-s32 igb_check_for_msg(struct e1000_hw *, u16);
-s32 igb_check_for_ack(struct e1000_hw *, u16);
-s32 igb_check_for_rst(struct e1000_hw *, u16);
-s32 igb_init_mbx_params_pf(struct e1000_hw *);
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd353..2e19bc6960d6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,57 +12,120 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#include <linux/if_ether.h>
-#include <linux/delay.h>
+#include "e1000_api.h"
+
+static void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ * e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+ /* Initialize function pointers */
+ nvm->ops.init_params = e1000_null_ops_generic;
+ nvm->ops.acquire = e1000_null_ops_generic;
+ nvm->ops.read = e1000_null_read_nvm;
+ nvm->ops.release = e1000_null_nvm_generic;
+ nvm->ops.reload = e1000_reload_nvm_generic;
+ nvm->ops.update = e1000_null_ops_generic;
+ nvm->ops.valid_led_default = e1000_null_led_default;
+ nvm->ops.validate = e1000_null_ops_generic;
+ nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ * e1000_null_nvm_read - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_read_nvm");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_nvm_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_nvm_generic");
+ return;
+}
-#include "e1000_mac.h"
-#include "e1000_nvm.h"
+/**
+ * e1000_null_led_default - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_led_default");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_write_nvm - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
+ u16 E1000_UNUSEDARG *c)
+{
+ DEBUGFUNC("e1000_null_write_nvm");
+ return E1000_SUCCESS;
+}
/**
- * igb_raise_eec_clk - Raise EEPROM clock
+ * e1000_raise_eec_clk - Raise EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Enable/Raise the EEPROM clock bit.
**/
-static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd | E1000_EECD_SK;
- wr32(E1000_EECD, *eecd);
- wrfl();
- udelay(hw->nvm.delay_usec);
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
}
/**
- * igb_lower_eec_clk - Lower EEPROM clock
+ * e1000_lower_eec_clk - Lower EEPROM clock
* @hw: pointer to the HW structure
* @eecd: pointer to the EEPROM
*
* Clear/Lower the EEPROM clock bit.
**/
-static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
{
*eecd = *eecd & ~E1000_EECD_SK;
- wr32(E1000_EECD, *eecd);
- wrfl();
- udelay(hw->nvm.delay_usec);
+ E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(hw->nvm.delay_usec);
}
/**
- * igb_shift_out_eec_bits - Shift data bits our to the EEPROM
+ * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
* @hw: pointer to the HW structure
* @data: data to send to the EEPROM
* @count: number of bits to shift out
@@ -71,12 +134,14 @@ static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
* "data" parameter will be shifted out to the EEPROM one bit at a time.
* In order to do this, "data" must be broken down into bits.
**/
-static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = rd32(E1000_EECD);
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
u32 mask;
+ DEBUGFUNC("e1000_shift_out_eec_bits");
+
mask = 0x01 << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
@@ -87,23 +152,23 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
if (data & mask)
eecd |= E1000_EECD_DI;
- wr32(E1000_EECD, eecd);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
- udelay(nvm->delay_usec);
+ usec_delay(nvm->delay_usec);
- igb_raise_eec_clk(hw, &eecd);
- igb_lower_eec_clk(hw, &eecd);
+ e1000_raise_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
mask >>= 1;
} while (mask);
eecd &= ~E1000_EECD_DI;
- wr32(E1000_EECD, eecd);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
}
/**
- * igb_shift_in_eec_bits - Shift data bits in from the EEPROM
+ * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
* @hw: pointer to the HW structure
* @count: number of bits to shift in
*
@@ -113,121 +178,124 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
* "DO" bit. During this "shifting in" process the data in "DI" bit should
* always be clear.
**/
-static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
{
u32 eecd;
u32 i;
u16 data;
- eecd = rd32(E1000_EECD);
+ DEBUGFUNC("e1000_shift_in_eec_bits");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
data = 0;
for (i = 0; i < count; i++) {
data <<= 1;
- igb_raise_eec_clk(hw, &eecd);
+ e1000_raise_eec_clk(hw, &eecd);
- eecd = rd32(E1000_EECD);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
eecd &= ~E1000_EECD_DI;
if (eecd & E1000_EECD_DO)
data |= 1;
- igb_lower_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
}
return data;
}
/**
- * igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
* @hw: pointer to the HW structure
* @ee_reg: EEPROM flag for polling
*
* Polls the EEPROM status bit for either read or write completion based
* upon the value of 'ee_reg'.
**/
-static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
{
u32 attempts = 100000;
u32 i, reg = 0;
- s32 ret_val = -E1000_ERR_NVM;
+
+ DEBUGFUNC("e1000_poll_eerd_eewr_done");
for (i = 0; i < attempts; i++) {
if (ee_reg == E1000_NVM_POLL_READ)
- reg = rd32(E1000_EERD);
+ reg = E1000_READ_REG(hw, E1000_EERD);
else
- reg = rd32(E1000_EEWR);
+ reg = E1000_READ_REG(hw, E1000_EEWR);
- if (reg & E1000_NVM_RW_REG_DONE) {
- ret_val = 0;
- break;
- }
+ if (reg & E1000_NVM_RW_REG_DONE)
+ return E1000_SUCCESS;
- udelay(5);
+ usec_delay(5);
}
- return ret_val;
+ return -E1000_ERR_NVM;
}
/**
- * igb_acquire_nvm - Generic request for access to EEPROM
+ * e1000_acquire_nvm_generic - Generic request for access to EEPROM
* @hw: pointer to the HW structure
*
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm(struct e1000_hw *hw)
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
{
- u32 eecd = rd32(E1000_EECD);
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
- s32 ret_val = 0;
+ DEBUGFUNC("e1000_acquire_nvm_generic");
- wr32(E1000_EECD, eecd | E1000_EECD_REQ);
- eecd = rd32(E1000_EECD);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
while (timeout) {
if (eecd & E1000_EECD_GNT)
break;
- udelay(5);
- eecd = rd32(E1000_EECD);
+ usec_delay(5);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
timeout--;
}
if (!timeout) {
eecd &= ~E1000_EECD_REQ;
- wr32(E1000_EECD, eecd);
- hw_dbg("Could not acquire NVM grant\n");
- ret_val = -E1000_ERR_NVM;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ DEBUGOUT("Could not acquire NVM grant\n");
+ return -E1000_ERR_NVM;
}
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_standby_nvm - Return EEPROM to standby state
+ * e1000_standby_nvm - Return EEPROM to standby state
* @hw: pointer to the HW structure
*
* Return the EEPROM to a standby state.
**/
-static void igb_standby_nvm(struct e1000_hw *hw)
+static void e1000_standby_nvm(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = rd32(E1000_EECD);
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+ DEBUGFUNC("e1000_standby_nvm");
if (nvm->type == e1000_nvm_eeprom_spi) {
/* Toggle CS to flush commands */
eecd |= E1000_EECD_CS;
- wr32(E1000_EECD, eecd);
- wrfl();
- udelay(nvm->delay_usec);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
eecd &= ~E1000_EECD_CS;
- wr32(E1000_EECD, eecd);
- wrfl();
- udelay(nvm->delay_usec);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(nvm->delay_usec);
}
}
@@ -241,53 +309,57 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
{
u32 eecd;
- eecd = rd32(E1000_EECD);
+ DEBUGFUNC("e1000_stop_nvm");
+
+ eecd = E1000_READ_REG(hw, E1000_EECD);
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
/* Pull CS high */
eecd |= E1000_EECD_CS;
- igb_lower_eec_clk(hw, &eecd);
+ e1000_lower_eec_clk(hw, &eecd);
}
}
/**
- * igb_release_nvm - Release exclusive access to EEPROM
+ * e1000_release_nvm_generic - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
**/
-void igb_release_nvm(struct e1000_hw *hw)
+void e1000_release_nvm_generic(struct e1000_hw *hw)
{
u32 eecd;
+ DEBUGFUNC("e1000_release_nvm_generic");
+
e1000_stop_nvm(hw);
- eecd = rd32(E1000_EECD);
+ eecd = E1000_READ_REG(hw, E1000_EECD);
eecd &= ~E1000_EECD_REQ;
- wr32(E1000_EECD, eecd);
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
}
/**
- * igb_ready_nvm_eeprom - Prepares EEPROM for read/write
+ * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
* @hw: pointer to the HW structure
*
* Setups the EEPROM for reading and writing.
**/
-static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- u32 eecd = rd32(E1000_EECD);
- s32 ret_val = 0;
- u16 timeout = 0;
+ u32 eecd = E1000_READ_REG(hw, E1000_EECD);
u8 spi_stat_reg;
+ DEBUGFUNC("e1000_ready_nvm_eeprom");
if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
- wr32(E1000_EECD, eecd);
- wrfl();
- udelay(1);
- timeout = NVM_MAX_RETRY_SPI;
+ E1000_WRITE_REG(hw, E1000_EECD, eecd);
+ E1000_WRITE_FLUSH(hw);
+ usec_delay(1);
/* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
@@ -295,30 +367,28 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
* not cleared within 'timeout', then error out.
*/
while (timeout) {
- igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
- hw->nvm.opcode_bits);
- spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
+ e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+ hw->nvm.opcode_bits);
+ spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
break;
- udelay(5);
- igb_standby_nvm(hw);
+ usec_delay(5);
+ e1000_standby_nvm(hw);
timeout--;
}
if (!timeout) {
- hw_dbg("SPI NVM Status error\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ DEBUGOUT("SPI NVM Status error\n");
+ return -E1000_ERR_NVM;
}
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_read_nvm_spi - Read EEPROM's using SPI
+ * e1000_read_nvm_spi - Read EEPROM's using SPI
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
@@ -326,7 +396,7 @@ out:
*
* Reads a 16 bit word from the EEPROM.
**/
-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i = 0;
@@ -334,51 +404,51 @@ s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u16 word_in;
u8 read_opcode = NVM_READ_OPCODE_SPI;
+ DEBUGFUNC("e1000_read_nvm_spi");
+
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
- hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
}
ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
- ret_val = igb_ready_nvm_eeprom(hw);
+ ret_val = e1000_ready_nvm_eeprom(hw);
if (ret_val)
goto release;
- igb_standby_nvm(hw);
+ e1000_standby_nvm(hw);
if ((nvm->address_bits == 8) && (offset >= 128))
read_opcode |= NVM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
- igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
- igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+ e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
/* Read the data. SPI NVMs increment the address with each byte
* read and will roll over if reading beyond the end. This allows
* us to read the whole NVM from any offset
*/
for (i = 0; i < words; i++) {
- word_in = igb_shift_in_eec_bits(hw, 16);
+ word_in = e1000_shift_in_eec_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
}
release:
nvm->ops.release(hw);
-out:
return ret_val;
}
/**
- * igb_read_nvm_eerd - Reads EEPROM using EERD register
+ * e1000_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
@@ -386,41 +456,44 @@ out:
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, eerd = 0;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_read_nvm_eerd");
/* A check for invalid values: offset too large, too many words,
- * and not enough words.
+ * too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
- hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
}
for (i = 0; i < words; i++) {
eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
- E1000_NVM_RW_REG_START;
+ E1000_NVM_RW_REG_START;
- wr32(E1000_EERD, eerd);
- ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+ E1000_WRITE_REG(hw, E1000_EERD, eerd);
+ ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
if (ret_val)
break;
- data[i] = (rd32(E1000_EERD) >>
- E1000_NVM_RW_REG_DATA);
+ data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+ E1000_NVM_RW_REG_DATA);
}
-out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
return ret_val;
}
/**
- * igb_write_nvm_spi - Write to EEPROM using SPI
+ * e1000_write_nvm_spi - Write to EEPROM using SPI
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
@@ -429,21 +502,23 @@ out:
* Writes data to EEPROM at offset using SPI interface.
*
* If e1000_update_nvm_checksum is not called after this function , the
- * EEPROM will most likley contain an invalid checksum.
+ * EEPROM will most likely contain an invalid checksum.
**/
-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
+ DEBUGFUNC("e1000_write_nvm_spi");
+
/* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
- hw_dbg("nvm parameter(s) out of bounds\n");
- return ret_val;
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ return -E1000_ERR_NVM;
}
while (widx < words) {
@@ -453,19 +528,19 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if (ret_val)
return ret_val;
- ret_val = igb_ready_nvm_eeprom(hw);
+ ret_val = e1000_ready_nvm_eeprom(hw);
if (ret_val) {
nvm->ops.release(hw);
return ret_val;
}
- igb_standby_nvm(hw);
+ e1000_standby_nvm(hw);
/* Send the WRITE ENABLE command (8 bit opcode) */
- igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+ e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
nvm->opcode_bits);
- igb_standby_nvm(hw);
+ e1000_standby_nvm(hw);
/* Some SPI eeproms use the 8th address bit embedded in the
* opcode
@@ -474,23 +549,23 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
write_opcode |= NVM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
- igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
- igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+ e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+ e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
nvm->address_bits);
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
- igb_shift_out_eec_bits(hw, word_out, 16);
+ e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
if ((((offset + widx) * 2) % nvm->page_size) == 0) {
- igb_standby_nvm(hw);
+ e1000_standby_nvm(hw);
break;
}
}
- usleep_range(1000, 2000);
+ msec_delay(10);
nvm->ops.release(hw);
}
@@ -498,132 +573,196 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
- * igb_read_part_string - Read device part number
+ * e1000_read_pba_string_generic - Read device part number
* @hw: pointer to the HW structure
- * @part_num: pointer to device part number
- * @part_num_size: size of part number buffer
+ * @pba_num: pointer to device part number
+ * @pba_num_size: size of part number buffer
*
* Reads the product board assembly (PBA) number from the EEPROM and stores
- * the value in part_num.
+ * the value in pba_num.
**/
-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
s32 ret_val;
u16 nvm_data;
- u16 pointer;
+ u16 pba_ptr;
u16 offset;
u16 length;
- if (part_num == NULL) {
- hw_dbg("PBA string buffer was null\n");
- ret_val = E1000_ERR_INVALID_ARGUMENT;
- goto out;
+ DEBUGFUNC("e1000_read_pba_string_generic");
+
+ if ((hw->mac.type >= e1000_i210) &&
+ !e1000_get_flash_presence_i210(hw)) {
+ DEBUGOUT("Flashless no PBA string\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ if (pba_num == NULL) {
+ DEBUGOUT("PBA string buffer was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
}
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
- ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
/* if nvm_data is not ptr guard the PBA must be in legacy format which
- * means pointer is actually our second data word for the PBA number
+ * means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
if (nvm_data != NVM_PBA_PTR_GUARD) {
- hw_dbg("NVM PBA number is not stored as string\n");
+ DEBUGOUT("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (part_num_size < 11) {
- hw_dbg("PBA string buffer too small\n");
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
+ DEBUGOUT("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
- /* extract hex string from data and pointer */
- part_num[0] = (nvm_data >> 12) & 0xF;
- part_num[1] = (nvm_data >> 8) & 0xF;
- part_num[2] = (nvm_data >> 4) & 0xF;
- part_num[3] = nvm_data & 0xF;
- part_num[4] = (pointer >> 12) & 0xF;
- part_num[5] = (pointer >> 8) & 0xF;
- part_num[6] = '-';
- part_num[7] = 0;
- part_num[8] = (pointer >> 4) & 0xF;
- part_num[9] = pointer & 0xF;
+ /* extract hex string from data and pba_ptr */
+ pba_num[0] = (nvm_data >> 12) & 0xF;
+ pba_num[1] = (nvm_data >> 8) & 0xF;
+ pba_num[2] = (nvm_data >> 4) & 0xF;
+ pba_num[3] = nvm_data & 0xF;
+ pba_num[4] = (pba_ptr >> 12) & 0xF;
+ pba_num[5] = (pba_ptr >> 8) & 0xF;
+ pba_num[6] = '-';
+ pba_num[7] = 0;
+ pba_num[8] = (pba_ptr >> 4) & 0xF;
+ pba_num[9] = pba_ptr & 0xF;
/* put a null character on the end of our string */
- part_num[10] = '\0';
+ pba_num[10] = '\0';
/* switch all the data but the '-' to hex char */
for (offset = 0; offset < 10; offset++) {
- if (part_num[offset] < 0xA)
- part_num[offset] += '0';
- else if (part_num[offset] < 0x10)
- part_num[offset] += 'A' - 0xA;
+ if (pba_num[offset] < 0xA)
+ pba_num[offset] += '0';
+ else if (pba_num[offset] < 0x10)
+ pba_num[offset] += 'A' - 0xA;
}
- goto out;
+ return E1000_SUCCESS;
}
- ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
if (length == 0xFFFF || length == 0) {
- hw_dbg("NVM PBA number section invalid length\n");
- ret_val = E1000_ERR_NVM_PBA_SECTION;
- goto out;
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
}
- /* check if part_num buffer is big enough */
- if (part_num_size < (((u32)length * 2) - 1)) {
- hw_dbg("PBA string buffer too small\n");
- ret_val = E1000_ERR_NO_SPACE;
- goto out;
+ /* check if pba_num buffer is big enough */
+ if (pba_num_size < (((u32)length * 2) - 1)) {
+ DEBUGOUT("PBA string buffer too small\n");
+ return -E1000_ERR_NO_SPACE;
}
/* trim pba length from start of string */
- pointer++;
+ pba_ptr++;
length--;
for (offset = 0; offset < length; offset++) {
- ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
- part_num[offset * 2] = (u8)(nvm_data >> 8);
- part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+ pba_num[offset * 2] = (u8)(nvm_data >> 8);
+ pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
}
- part_num[offset * 2] = '\0';
+ pba_num[offset * 2] = '\0';
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_read_mac_addr - Read device MAC address
+ * e1000_read_pba_length_generic - Read device part number length
+ * @hw: pointer to the HW structure
+ * @pba_num_size: size of part number buffer
+ *
+ * Reads the product board assembly (PBA) number length from the EEPROM and
+ * stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+ s32 ret_val;
+ u16 nvm_data;
+ u16 pba_ptr;
+ u16 length;
+
+ DEBUGFUNC("e1000_read_pba_length_generic");
+
+ if (pba_num_size == NULL) {
+ DEBUGOUT("PBA buffer size was null\n");
+ return -E1000_ERR_INVALID_ARGUMENT;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ /* if data is not ptr guard the PBA must be in legacy format */
+ if (nvm_data != NVM_PBA_PTR_GUARD) {
+ *pba_num_size = E1000_PBANUM_LENGTH;
+ return E1000_SUCCESS;
+ }
+
+ ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+ if (ret_val) {
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
+ }
+
+ if (length == 0xFFFF || length == 0) {
+ DEBUGOUT("NVM PBA number section invalid length\n");
+ return -E1000_ERR_NVM_PBA_SECTION;
+ }
+
+ /* Convert from length in u16 values to u8 chars, add 1 for NULL,
+ * and subtract 2 because length field is included in length.
+ */
+ *pba_num_size = ((u32)length * 2) - 1;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
-s32 igb_read_mac_addr(struct e1000_hw *hw)
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
u32 rar_high;
u32 rar_low;
u16 i;
- rar_high = rd32(E1000_RAH(0));
- rar_low = rd32(E1000_RAL(0));
+ rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+ rar_low = E1000_READ_REG(hw, E1000_RAL(0));
for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
@@ -631,120 +770,148 @@ s32 igb_read_mac_addr(struct e1000_hw *hw)
for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
- for (i = 0; i < ETH_ALEN; i++)
+ for (i = 0; i < ETH_ADDR_LEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_validate_nvm_checksum - Validate EEPROM checksum
+ * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
+ DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
+ DEBUGOUT("NVM Read Error\n");
+ return ret_val;
}
checksum += nvm_data;
}
if (checksum != (u16) NVM_SUM) {
- hw_dbg("NVM Checksum Invalid\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ DEBUGOUT("NVM Checksum Invalid\n");
+ return -E1000_ERR_NVM;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_update_nvm_checksum - Update EEPROM checksum
+ * e1000_update_nvm_checksum_generic - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
-s32 igb_update_nvm_checksum(struct e1000_hw *hw)
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
{
- s32 ret_val;
+ s32 ret_val;
u16 checksum = 0;
u16 i, nvm_data;
+ DEBUGFUNC("e1000_update_nvm_checksum");
+
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum.\n");
- goto out;
+ DEBUGOUT("NVM Read Error while updating checksum.\n");
+ return ret_val;
}
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
- hw_dbg("NVM Write Error while updating checksum.\n");
+ DEBUGOUT("NVM Write Error while updating checksum.\n");
-out:
return ret_val;
}
/**
- * igb_get_fw_version - Get firmware version information
+ * e1000_reload_nvm_generic - Reloads EEPROM
* @hw: pointer to the HW structure
- * @fw_vers: pointer to output structure
*
- * unsupported MAC types will return all 0 version structure
+ * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ * extended control register.
**/
-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+static void e1000_reload_nvm_generic(struct e1000_hw *hw)
{
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 fw_version;
+ u32 ctrl_ext;
+
+ DEBUGFUNC("e1000_reload_nvm_generic");
+
+ usec_delay(10);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+ /* basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images */
+ /* Check which data format we have */
switch (hw->mac.type) {
case e1000_i211:
- igb_read_invm_version(hw, fw_vers);
+ e1000_read_invm_version(hw, fw_vers);
return;
case e1000_82575:
case e1000_82576:
case e1000_82580:
case e1000_i354:
- case e1000_i350:
- case e1000_i210:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
break;
- default:
- return;
- }
- /* basic eeprom version numbers */
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
-
- /* etrack id */
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
-
- switch (hw->mac.type) {
case e1000_i210:
- case e1000_i354:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ e1000_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
case e1000_i350:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
@@ -760,15 +927,48 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
- ((comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT));
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
- break;
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
+ eeprom_verl;
}
- return;
}
+
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aace..a4263113d72d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,22 +25,11 @@
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
-s32 igb_acquire_nvm(struct e1000_hw *hw);
-void igb_release_nvm(struct e1000_hw *hw);
-s32 igb_read_mac_addr(struct e1000_hw *hw);
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
-s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
-s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
-s32 igb_update_nvm_checksum(struct e1000_hw *hw);
-
struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
+ u16 eep_build;
u8 invm_major;
u8 invm_minor;
@@ -54,6 +40,31 @@ struct e1000_fw_version {
u16 or_build;
u16 or_patch;
};
-void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_get_fw_version(struct e1000_hw *hw,
+ struct e1000_fw_version *fw_vers);
+
+#define E1000_STM_OPCODE 0xDB00
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_osdep.h b/drivers/net/ethernet/intel/igb/e1000_osdep.h
new file mode 100644
index 000000000000..422bba05bea0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/e1000_osdep.h
@@ -0,0 +1,141 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+#define usec_delay(x) udelay(x)
+#define usec_delay_irq(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x) do { \
+ /* Don't mdelay in interrupt context! */ \
+ if (in_interrupt()) \
+ BUG(); \
+ else \
+ msleep(x); \
+} while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context. Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+
+#define E1000_READ_REG(x, y) e1000_read_reg(x, y)
+#endif
+
+#define PCI_COMMAND_REGISTER PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN ETH_ALEN
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+#ifdef DEBUG
+#define DEBUGOUT(S) pr_debug(S)
+#define DEBUGOUT1(S, A...) pr_debug(S, ## A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#ifdef DEBUG_FUNC
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#else
+#define DEBUGFUNC(F)
+#endif
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#define E1000_REGISTER(a, reg) reg
+
+/* forward declaration */
+struct e1000_hw;
+
+/* write operations, indexed using DWORDS */
+#define E1000_WRITE_REG(hw, reg, val) \
+do { \
+ u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+ if (!E1000_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+u32 e1000_read_reg(struct e1000_hw *hw, u32 reg);
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \
+ E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val))
+
+#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \
+ e1000_read_reg((hw), (reg) + ((idx) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+ writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \
+ ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+ readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+ writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+ readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
+
+#define E1000_WRITE_REG_IO(a, reg, offset) do { \
+ outl(reg, ((a)->io_base)); \
+ outl(offset, ((a)->io_base + 4)); \
+ } while (0)
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
+ writel((value), ((a)->flash_address + reg)))
+
+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
+ writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
+
+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
+
+#define E1000_REMOVED(h) unlikely(!(h))
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5dec66a96793..51726916215e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,140 +12,260 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#include <linux/if_ether.h>
-#include <linux/delay.h>
-
-#include "e1000_mac.h"
-#include "e1000_phy.h"
-
-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
- u16 *phy_ctrl);
-static s32 igb_wait_autoneg(struct e1000_hw *hw);
-static s32 igb_set_master_slave_mode(struct e1000_hw *hw);
+#include "e1000_api.h"
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
+ (sizeof(e1000_m88_cable_length_table) / \
+ sizeof(e1000_m88_cable_length_table[0]))
static const u16 e1000_igp_2_cable_length_table[] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
- 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
- 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
- 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
- 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
- 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
- 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
- 104, 109, 114, 118, 121, 124};
+ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+ 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+ 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+ 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+ 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+ 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+ 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+ 124};
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_igp_2_cable_length_table) / \
- sizeof(e1000_igp_2_cable_length_table[0]))
+ (sizeof(e1000_igp_2_cable_length_table) / \
+ sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ * e1000_init_phy_ops_generic - Initialize PHY function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ DEBUGFUNC("e1000_init_phy_ops_generic");
+
+ /* Initialize function pointers */
+ phy->ops.init_params = e1000_null_ops_generic;
+ phy->ops.acquire = e1000_null_ops_generic;
+ phy->ops.check_polarity = e1000_null_ops_generic;
+ phy->ops.check_reset_block = e1000_null_ops_generic;
+ phy->ops.commit = e1000_null_ops_generic;
+ phy->ops.force_speed_duplex = e1000_null_ops_generic;
+ phy->ops.get_cfg_done = e1000_null_ops_generic;
+ phy->ops.get_cable_length = e1000_null_ops_generic;
+ phy->ops.get_info = e1000_null_ops_generic;
+ phy->ops.set_page = e1000_null_set_page;
+ phy->ops.read_reg = e1000_null_read_reg;
+ phy->ops.read_reg_locked = e1000_null_read_reg;
+ phy->ops.read_reg_page = e1000_null_read_reg;
+ phy->ops.release = e1000_null_phy_generic;
+ phy->ops.reset = e1000_null_ops_generic;
+ phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+ phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+ phy->ops.write_reg = e1000_null_write_reg;
+ phy->ops.write_reg_locked = e1000_null_write_reg;
+ phy->ops.write_reg_page = e1000_null_write_reg;
+ phy->ops.power_up = e1000_null_phy_generic;
+ phy->ops.power_down = e1000_null_phy_generic;
+ phy->ops.read_i2c_byte = e1000_read_i2c_byte_null;
+ phy->ops.write_i2c_byte = e1000_write_i2c_byte_null;
+}
+
+/**
+ * e1000_null_set_page - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw,
+ u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_set_page");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_read_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_null_read_reg");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_null_phy_generic - No-op function, return void
+ * @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_null_phy_generic");
+ return;
+}
+
+/**
+ * e1000_null_lplu_state - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw,
+ bool E1000_UNUSEDARG active)
+{
+ DEBUGFUNC("e1000_null_lplu_state");
+ return E1000_SUCCESS;
+}
/**
- * igb_check_reset_block - Check if PHY reset is blocked
+ * e1000_null_write_reg - No-op function, return 0
+ * @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw,
+ u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_null_write_reg");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value read
+ *
+ **/
+s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG *data)
+{
+ DEBUGFUNC("e1000_read_i2c_byte_null");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_i2c_byte_null - No-op function, return 0
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: data value to write
+ *
+ **/
+s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw,
+ u8 E1000_UNUSEDARG byte_offset,
+ u8 E1000_UNUSEDARG dev_addr,
+ u8 E1000_UNUSEDARG data)
+{
+ DEBUGFUNC("e1000_write_i2c_byte_null");
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
*
* Read the PHY management control register and check whether a PHY reset
- * is blocked. If a reset is not blocked return 0, otherwise
+ * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise
* return E1000_BLK_PHY_RESET (12).
**/
-s32 igb_check_reset_block(struct e1000_hw *hw)
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
{
u32 manc;
- manc = rd32(E1000_MANC);
+ DEBUGFUNC("e1000_check_reset_block");
+
+ manc = E1000_READ_REG(hw, E1000_MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+ E1000_BLK_PHY_RESET : E1000_SUCCESS;
}
/**
- * igb_get_phy_id - Retrieve the PHY ID and revision
+ * e1000_get_phy_id - Retrieve the PHY ID and revision
* @hw: pointer to the HW structure
*
* Reads the PHY registers and stores the PHY ID and possibly the PHY
* revision in the hardware structure.
**/
-s32 igb_get_phy_id(struct e1000_hw *hw)
+s32 e1000_get_phy_id(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 phy_id;
+ DEBUGFUNC("e1000_get_phy_id");
+
+ if (!phy->ops.read_reg)
+ return E1000_SUCCESS;
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
- goto out;
+ return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usec_delay(20);
ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
if (ret_val)
- goto out;
+ return ret_val;
phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_phy_reset_dsp - Reset PHY DSP
+ * e1000_phy_reset_dsp_generic - Reset PHY DSP
* @hw: pointer to the HW structure
*
* Reset the digital signal processor.
**/
-static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
+
+ DEBUGFUNC("e1000_phy_reset_dsp_generic");
- if (!(hw->phy.ops.write_reg))
- goto out;
+ if (!hw->phy.ops.write_reg)
+ return E1000_SUCCESS;
ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
if (ret_val)
- goto out;
-
- ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+ return ret_val;
-out:
- return ret_val;
+ return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
}
/**
- * igb_read_phy_reg_mdic - Read MDI control register
+ * e1000_read_phy_reg_mdic - Read MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
*
- * Reads the MDI control regsiter in the PHY at offset and stores the
+ * Reads the MDI control register in the PHY at offset and stores the
* information read to data.
**/
-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
- s32 ret_val = 0;
+
+ DEBUGFUNC("e1000_read_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
- hw_dbg("PHY Address %d is out of range\n", offset);
- ret_val = -E1000_ERR_PARAM;
- goto out;
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
@@ -156,52 +276,55 @@ s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
(phy->addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_READ));
- wr32(E1000_MDIC, mdic);
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = rd32(E1000_MDIC);
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
if (mdic & E1000_MDIC_READY)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- hw_dbg("MDI Read did not complete\n");
- ret_val = -E1000_ERR_PHY;
- goto out;
+ DEBUGOUT("MDI Read did not complete\n");
+ return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- hw_dbg("MDI Error\n");
- ret_val = -E1000_ERR_PHY;
- goto out;
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
}
*data = (u16) mdic;
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_write_phy_reg_mdic - Write MDI control register
+ * e1000_write_phy_reg_mdic - Write MDI control register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write to register at offset
*
* Writes data to MDI control register in the PHY at offset.
**/
-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, mdic = 0;
- s32 ret_val = 0;
+
+ DEBUGFUNC("e1000_write_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
- hw_dbg("PHY Address %d is out of range\n", offset);
- ret_val = -E1000_ERR_PARAM;
- goto out;
+ DEBUGOUT1("PHY Address %d is out of range\n", offset);
+ return -E1000_ERR_PARAM;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
@@ -213,35 +336,38 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
(phy->addr << E1000_MDIC_PHY_SHIFT) |
(E1000_MDIC_OP_WRITE));
- wr32(E1000_MDIC, mdic);
+ E1000_WRITE_REG(hw, E1000_MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- udelay(50);
- mdic = rd32(E1000_MDIC);
+ usec_delay_irq(50);
+ mdic = E1000_READ_REG(hw, E1000_MDIC);
if (mdic & E1000_MDIC_READY)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- hw_dbg("MDI Write did not complete\n");
- ret_val = -E1000_ERR_PHY;
- goto out;
+ DEBUGOUT("MDI Write did not complete\n");
+ return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- hw_dbg("MDI Error\n");
- ret_val = -E1000_ERR_PHY;
- goto out;
+ DEBUGOUT("MDI Error\n");
+ return -E1000_ERR_PHY;
+ }
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_read_phy_reg_i2c - Read PHY register using i2c
+ * e1000_read_phy_reg_i2c - Read PHY register using i2c
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
@@ -249,11 +375,13 @@ out:
* Reads the PHY register at offset using the i2c interface and stores the
* retrieved information in data.
**/
-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, i2ccmd = 0;
+ DEBUGFUNC("e1000_read_phy_reg_i2c");
+
/* Set up Op-code, Phy Address, and register address in the I2CCMD
* register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
@@ -262,47 +390,49 @@ s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
(phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
(E1000_I2CCMD_OPCODE_READ));
- wr32(E1000_I2CCMD, i2ccmd);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
/* Poll the ready bit to see if the I2C read completed */
for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- i2ccmd = rd32(E1000_I2CCMD);
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
if (i2ccmd & E1000_I2CCMD_READY)
break;
}
if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Read did not complete\n");
+ DEBUGOUT("I2CCMD Read did not complete\n");
return -E1000_ERR_PHY;
}
if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
+ DEBUGOUT("I2CCMD Error bit set\n");
return -E1000_ERR_PHY;
}
/* Need to byte-swap the 16-bit value. */
*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_write_phy_reg_i2c - Write PHY register using i2c
+ * e1000_write_phy_reg_i2c - Write PHY register using i2c
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
*
* Writes the data to PHY register at the offset using the i2c interface.
**/
-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, i2ccmd = 0;
u16 phy_data_swapped;
+ DEBUGFUNC("e1000_write_phy_reg_i2c");
+
/* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
- hw_dbg("PHY I2C Address %d is out of range.\n",
+ DEBUGOUT1("PHY I2C Address %d is out of range.\n",
hw->phy.addr);
return -E1000_ERR_CONFIG;
}
@@ -319,29 +449,157 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
E1000_I2CCMD_OPCODE_WRITE |
phy_data_swapped);
- wr32(E1000_I2CCMD, i2ccmd);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
/* Poll the ready bit to see if the I2C read completed */
for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- i2ccmd = rd32(E1000_I2CCMD);
+ usec_delay(50);
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
if (i2ccmd & E1000_I2CCMD_READY)
break;
}
if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Write did not complete\n");
+ DEBUGOUT("I2CCMD Write did not complete\n");
return -E1000_ERR_PHY;
}
if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
+ DEBUGOUT("I2CCMD Error bit set\n");
return -E1000_ERR_PHY;
}
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_read_phy_reg_igp - Read igp PHY register
+ * e1000_read_sfp_data_byte - Reads SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to be read
+ * @data: read data buffer pointer
+ *
+ * Reads one byte from SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_read_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing with the
+ * EEPROM to retrieve the desired data.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+ /* Poll the ready bit to see if the I2C read completed */
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ data_local = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (data_local & E1000_I2CCMD_READY)
+ break;
+ }
+ if (!(data_local & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Read did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (data_local & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ *data = (u8) data_local & 0xFF;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_sfp_data_byte - Writes SFP module data.
+ * @hw: pointer to the HW structure
+ * @offset: byte location offset to write to
+ * @data: data to write
+ *
+ * Writes one byte to SFP module data stored
+ * in SFP resided EEPROM memory or SFP diagnostic area.
+ * Function should be called with
+ * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ * access
+ **/
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
+{
+ u32 i = 0;
+ u32 i2ccmd = 0;
+ u32 data_local = 0;
+
+ DEBUGFUNC("e1000_write_sfp_data_byte");
+
+ if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+ DEBUGOUT("I2CCMD command address exceeds upper limit\n");
+ return -E1000_ERR_PHY;
+ }
+ /* The programming interface is 16 bits wide
+ * so we need to read the whole word first
+ * then update appropriate byte lane and write
+ * the updated word back.
+ */
+ /* Set up Op-code, EEPROM Address,in the I2CCMD
+ * register. The MAC will take care of interfacing
+ * with an EEPROM to write the data given.
+ */
+ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_READ);
+ /* Set a command to read single word */
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+ usec_delay(50);
+ /* Poll the ready bit to see if lastly
+ * launched I2C operation completed
+ */
+ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+ if (i2ccmd & E1000_I2CCMD_READY) {
+ /* Check if this is READ or WRITE phase */
+ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
+ E1000_I2CCMD_OPCODE_READ) {
+ /* Write the selected byte
+ * lane and update whole word
+ */
+ data_local = i2ccmd & 0xFF00;
+ data_local |= data;
+ i2ccmd = ((offset <<
+ E1000_I2CCMD_REG_ADDR_SHIFT) |
+ E1000_I2CCMD_OPCODE_WRITE | data_local);
+ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+ } else {
+ break;
+ }
+ }
+ }
+ if (!(i2ccmd & E1000_I2CCMD_READY)) {
+ DEBUGOUT("I2CCMD Write did not complete\n");
+ return -E1000_ERR_PHY;
+ }
+ if (i2ccmd & E1000_I2CCMD_ERROR) {
+ DEBUGOUT("I2CCMD Error bit set\n");
+ return -E1000_ERR_PHY;
+ }
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_phy_reg_m88 - Read m88 PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
* @data: pointer to the read data
@@ -350,38 +608,29 @@ s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
* and storing the retrieved information in data. Release any acquired
* semaphores before exiting.
**/
-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
{
- s32 ret_val = 0;
+ s32 ret_val;
- if (!(hw->phy.ops.acquire))
- goto out;
+ DEBUGFUNC("e1000_read_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
-
- if (offset > MAX_PHY_MULTI_PAGE_REG) {
- ret_val = igb_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
- }
+ return ret_val;
- ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
hw->phy.ops.release(hw);
-out:
return ret_val;
}
/**
- * igb_write_phy_reg_igp - Write igp PHY register
+ * e1000_write_phy_reg_m88 - Write m88 PHY register
* @hw: pointer to the HW structure
* @offset: register offset to write to
* @data: data to write at register offset
@@ -389,80 +638,413 @@ out:
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
{
- s32 ret_val = 0;
+ s32 ret_val;
- if (!(hw->phy.ops.acquire))
- goto out;
+ DEBUGFUNC("e1000_write_phy_reg_m88");
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
- if (offset > MAX_PHY_MULTI_PAGE_REG) {
- ret_val = igb_write_phy_reg_mdic(hw,
- IGP01E1000_PHY_PAGE_SELECT,
- (u16)offset);
- if (ret_val) {
- hw->phy.ops.release(hw);
- goto out;
- }
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
+
+ hw->phy.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * e1000_set_page_igp - Set page as on IGP-like PHY(s)
+ * @hw: pointer to the HW structure
+ * @page: page to set (shifted left when necessary)
+ *
+ * Sets PHY page required for PHY register access. Assumes semaphore is
+ * already acquired. Note, this function sets phy.addr to 1 so the caller
+ * must set it appropriately (if necessary) after this function returns.
+ **/
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+ DEBUGFUNC("e1000_set_page_igp");
+
+ DEBUGOUT1("Setting page 0x%x\n", page);
+
+ hw->phy.addr = 1;
+
+ return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ * __e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then reads the PHY register at offset
+ * and stores the retrieved information in data. Release any acquired
+ * semaphores before exiting.
+ **/
+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
}
- ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_read_phy_reg_mdic(hw,
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
- hw->phy.ops.release(hw);
+ return ret_val;
+}
+
+/**
+ * e1000_read_phy_reg_igp - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset and stores the
+ * retrieved information in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_phy_reg_igp_locked - Read igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset and stores the retrieved information
+ * in data. Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * e1000_write_phy_reg_igp - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary, then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ s32 ret_val = E1000_SUCCESS;
+
+ DEBUGFUNC("e1000_write_phy_reg_igp");
+
+ if (!locked) {
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ if (offset > MAX_PHY_MULTI_PAGE_REG)
+ ret_val = e1000_write_phy_reg_mdic(hw,
+ IGP01E1000_PHY_PAGE_SELECT,
+ (u16)offset);
+ if (!ret_val)
+ ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
+ offset,
+ data);
+ if (!locked)
+ hw->phy.ops.release(hw);
-out:
return ret_val;
}
/**
- * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
+ * e1000_write_phy_reg_igp - Write igp PHY register
* @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
*
- * Sets up Carrier-sense on Transmit and downshift values.
+ * Acquires semaphore then writes the data to PHY register
+ * at the offset. Release any acquired semaphores before exiting.
**/
-s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_phy_reg_igp_locked - Write igp PHY register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Writes the data to PHY register at the offset.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ * __e1000_read_kmrn_reg - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then reads the PHY register at offset
+ * using the kumeran interface. The information retrieved is stored in data.
+ * Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("__e1000_read_kmrn_reg");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+ *data = (u16)kmrnctrlsta;
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_read_kmrn_reg_generic - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Acquires semaphore then reads the PHY register at offset using the
+ * kumeran interface. The information retrieved is stored in data.
+ * Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_read_kmrn_reg_locked - Read kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to be read
+ * @data: pointer to the read data
+ *
+ * Reads the PHY register at offset using the kumeran interface. The
+ * information retrieved is stored in data.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+ return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * __e1000_write_kmrn_reg - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ * @locked: semaphore has already been acquired or not
+ *
+ * Acquires semaphore, if necessary. Then write the data to PHY register
+ * at the offset using the kumeran interface. Release any acquired semaphores
+ * before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+ bool locked)
+{
+ u32 kmrnctrlsta;
+
+ DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+ if (!locked) {
+ s32 ret_val = E1000_SUCCESS;
+
+ if (!hw->phy.ops.acquire)
+ return E1000_SUCCESS;
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+ }
+
+ kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+ E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+ E1000_WRITE_FLUSH(hw);
+
+ usec_delay(2);
+
+ if (!locked)
+ hw->phy.ops.release(hw);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_kmrn_reg_generic - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Acquires semaphore then writes the data to the PHY register at the offset
+ * using the kumeran interface. Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ * e1000_write_kmrn_reg_locked - Write kumeran register
+ * @hw: pointer to the HW structure
+ * @offset: register offset to write to
+ * @data: data to write at register offset
+ *
+ * Write the data to PHY register at the offset using the kumeran interface.
+ * Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+ return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Master/slave mode
+ **/
+static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
{
- struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
+ /* Resolve Master/Slave mode */
+ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* load defaults for future use */
+ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+ ((phy_data & CR_1000T_MS_VALUE) ?
+ e1000_ms_force_master :
+ e1000_ms_force_slave) : e1000_ms_auto;
+
+ switch (hw->phy.ms_type) {
+ case e1000_ms_force_master:
+ phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_force_slave:
+ phy_data |= CR_1000T_MS_ENABLE;
+ phy_data &= ~(CR_1000T_MS_VALUE);
+ break;
+ case e1000_ms_auto:
+ phy_data &= ~CR_1000T_MS_ENABLE;
+ /* fall-through */
+ default:
+ break;
}
- if (phy->type == e1000_phy_82580) {
+ return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
+
+/**
+ * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ DEBUGFUNC("e1000_copper_link_setup_82577");
+
+ if (hw->phy.reset_disable)
+ return E1000_SUCCESS;
+
+ if (hw->phy.type == e1000_phy_82580) {
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
}
}
- /* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
+ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
+ phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
/* Enable downshift */
- phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
+ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
- ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+ ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Set MDI/MDIX mode */
- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
if (ret_val)
- goto out;
- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
/* Options:
* 0 - Auto (default)
* 1 - MDI mode
@@ -472,41 +1054,42 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
case 1:
break;
case 2:
- phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
break;
case 0:
default:
- phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
break;
}
- ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+ ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
-out:
- return ret_val;
+ return e1000_set_master_slave_mode(hw);
}
/**
- * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
* @hw: pointer to the HW structure
*
* Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock
* and downshift values are set also.
**/
-s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ DEBUGFUNC("e1000_copper_link_setup_m88");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
- /* Enable CRS on TX. This must be set for half-duplex operation. */
+ /* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
@@ -542,12 +1125,12 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
if (phy->revision < E1000_REVISION_4) {
/* Force TX_CLK in the Extended PHY Specific Control Register
@@ -556,7 +1139,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
&phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data |= M88E1000_EPSCR_TX_CLK_25;
@@ -568,54 +1151,48 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
} else {
/* Configure Master and Slave downshift values */
phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
- M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+ M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
}
ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
phy_data);
if (ret_val)
- goto out;
+ return ret_val;
}
/* Commit the changes. */
- ret_val = igb_phy_sw_reset(hw);
+ ret_val = phy->ops.commit(hw);
if (ret_val) {
- hw_dbg("Error committing the PHY changes\n");
- goto out;
- }
- if (phy->type == e1000_phy_i210) {
- ret_val = igb_set_master_slave_mode(hw);
- if (ret_val)
- return ret_val;
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
* @hw: pointer to the HW structure
*
* Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
* Also enables and sets the downshift parameters.
**/
-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Options:
* MDI/MDI-X = 0 (default)
@@ -652,81 +1229,87 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
* 1 - Enabled
*/
phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
- if (phy->disable_polarity_correction == 1)
+ if (phy->disable_polarity_correction)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Commit the changes. */
- ret_val = igb_phy_sw_reset(hw);
+ ret_val = phy->ops.commit(hw);
if (ret_val) {
- hw_dbg("Error committing the PHY changes\n");
- goto out;
+ DEBUGOUT("Error committing the PHY changes\n");
+ return ret_val;
}
-out:
- return ret_val;
+ ret_val = e1000_set_master_slave_mode(hw);
+ if (ret_val)
+ return ret_val;
+
+ return E1000_SUCCESS;
}
/**
- * igb_copper_link_setup_igp - Setup igp PHY's for copper link
+ * e1000_copper_link_setup_igp - Setup igp PHY's for copper link
* @hw: pointer to the HW structure
*
* Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
* igp PHY's.
**/
-s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ DEBUGFUNC("e1000_copper_link_setup_igp");
+
+ if (phy->reset_disable)
+ return E1000_SUCCESS;
- ret_val = phy->ops.reset(hw);
+ ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
+ DEBUGOUT("Error resetting the PHY.\n");
+ return ret_val;
}
/* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
* timeout issues when LFS is enabled.
*/
- msleep(100);
+ msec_delay(100);
- /* The NVM settings will configure LPLU in D3 for
- * non-IGP1 PHYs.
- */
- if (phy->type == e1000_phy_igp) {
- /* disable lplu d3 during driver init */
- if (phy->ops.set_d3_lplu_state)
- ret_val = phy->ops.set_d3_lplu_state(hw, false);
+ /* disable lplu d0 during driver init */
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
if (ret_val) {
- hw_dbg("Error Disabling LPLU D3\n");
- goto out;
+ DEBUGOUT("Error Disabling LPLU D0\n");
+ return ret_val;
}
}
-
- /* disable lplu d0 during driver init */
- ret_val = phy->ops.set_d0_lplu_state(hw, false);
- if (ret_val) {
- hw_dbg("Error Disabling LPLU D0\n");
- goto out;
- }
/* Configure mdi-mdix settings */
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~IGP01E1000_PSCR_AUTO_MDIX;
@@ -744,7 +1327,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
}
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
if (ret_val)
- goto out;
+ return ret_val;
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
@@ -758,125 +1341,34 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
- goto out;
+ return ret_val;
/* Set auto Master/Slave resolution process */
ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~CR_1000T_MS_ENABLE;
ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
if (ret_val)
- goto out;
+ return ret_val;
}
- ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
- if (ret_val)
- goto out;
-
- /* load defaults for future use */
- phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
- ((data & CR_1000T_MS_VALUE) ?
- e1000_ms_force_master :
- e1000_ms_force_slave) :
- e1000_ms_auto;
-
- switch (phy->ms_type) {
- case e1000_ms_force_master:
- data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
- break;
- case e1000_ms_force_slave:
- data |= CR_1000T_MS_ENABLE;
- data &= ~(CR_1000T_MS_VALUE);
- break;
- case e1000_ms_auto:
- data &= ~CR_1000T_MS_ENABLE;
- default:
- break;
- }
- ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
- if (ret_val)
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * igb_copper_link_autoneg - Setup/Enable autoneg for copper link
- * @hw: pointer to the HW structure
- *
- * Performs initial bounds checking on autoneg advertisement parameter, then
- * configure to advertise the full capability. Setup the PHY to autoneg
- * and restart the negotiation process between the link partner. If
- * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
- **/
-static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
-{
- struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
- u16 phy_ctrl;
-
- /* Perform some bounds checking on the autoneg advertisement
- * parameter.
- */
- phy->autoneg_advertised &= phy->autoneg_mask;
-
- /* If autoneg_advertised is zero, we assume it was not defaulted
- * by the calling code so we set to advertise full capability.
- */
- if (phy->autoneg_advertised == 0)
- phy->autoneg_advertised = phy->autoneg_mask;
-
- hw_dbg("Reconfiguring auto-neg advertisement params\n");
- ret_val = igb_phy_setup_autoneg(hw);
- if (ret_val) {
- hw_dbg("Error Setting up Auto-Negotiation\n");
- goto out;
- }
- hw_dbg("Restarting Auto-Neg\n");
-
- /* Restart auto-negotiation by setting the Auto Neg Enable bit and
- * the Auto Neg Restart bit in the PHY control register.
- */
- ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
- if (ret_val)
- goto out;
-
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
- if (ret_val)
- goto out;
-
- /* Does the user want to wait for Auto-Neg to complete here, or
- * check at a later time (for example, callback routine).
- */
- if (phy->autoneg_wait_to_complete) {
- ret_val = igb_wait_autoneg(hw);
- if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
- goto out;
- }
+ ret_val = e1000_set_master_slave_mode(hw);
}
- hw->mac.get_link_status = true;
-
-out:
return ret_val;
}
/**
- * igb_phy_setup_autoneg - Configure PHY for auto-negotiation
+ * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
* @hw: pointer to the HW structure
*
* Reads the MII auto-neg advertisement register and/or the 1000T control
@@ -884,26 +1376,28 @@ out:
* return successful. Otherwise, setup advertisement and flow control to
* the appropriate values for the wanted auto-negotiation.
**/
-static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg = 0;
+ DEBUGFUNC("e1000_phy_setup_autoneg");
+
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
- goto out;
+ return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
&mii_1000t_ctrl_reg);
if (ret_val)
- goto out;
+ return ret_val;
}
/* Need to parse both autoneg_advertised and fc and set up
@@ -923,39 +1417,39 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
NWAY_AR_10T_HD_CAPS);
mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
- hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+ DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
- hw_dbg("Advertise 10mb Half duplex\n");
+ DEBUGOUT("Advertise 10mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
- hw_dbg("Advertise 10mb Full duplex\n");
+ DEBUGOUT("Advertise 10mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
- hw_dbg("Advertise 100mb Half duplex\n");
+ DEBUGOUT("Advertise 100mb Half duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
- hw_dbg("Advertise 100mb Full duplex\n");
+ DEBUGOUT("Advertise 100mb Full duplex\n");
mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
- hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+ DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
- hw_dbg("Advertise 1000mb Full duplex\n");
+ DEBUGOUT("Advertise 1000mb Full duplex\n");
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
@@ -972,68 +1466,126 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* but we do not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: No software override. The flow control configuration
* in the EEPROM is used.
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
- /* Flow control (RX & TX) is completely disabled by a
+ /* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
- /* RX Flow control is enabled, and TX Flow control is
+ /* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
- * capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later
+ * capable of Rx Pause ONLY, we will advertise that we
+ * support both symmetric and asymmetric Rx PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
- /* TX Flow control is enabled, and RX Flow control is
+ /* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
- /* Flow control (both RX and TX) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
default:
- hw_dbg("Flow control param set incorrectly\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Flow control param set incorrectly\n");
+ return -E1000_ERR_CONFIG;
}
ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
if (ret_val)
- goto out;
+ return ret_val;
- hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+ DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
- if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
- ret_val = phy->ops.write_reg(hw,
- PHY_1000T_CTRL,
+ if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL,
mii_1000t_ctrl_reg);
- if (ret_val)
- goto out;
+
+ return ret_val;
+}
+
+/**
+ * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ * @hw: pointer to the HW structure
+ *
+ * Performs initial bounds checking on autoneg advertisement parameter, then
+ * configure to advertise the full capability. Setup the PHY to autoneg
+ * and restart the negotiation process between the link partner. If
+ * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_ctrl;
+
+ DEBUGFUNC("e1000_copper_link_autoneg");
+
+ /* Perform some bounds checking on the autoneg advertisement
+ * parameter.
+ */
+ phy->autoneg_advertised &= phy->autoneg_mask;
+
+ /* If autoneg_advertised is zero, we assume it was not defaulted
+ * by the calling code so we set to advertise full capability.
+ */
+ if (!phy->autoneg_advertised)
+ phy->autoneg_advertised = phy->autoneg_mask;
+
+ DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+ ret_val = e1000_phy_setup_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error Setting up Auto-Negotiation\n");
+ return ret_val;
}
+ DEBUGOUT("Restarting Auto-Neg\n");
+
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+ * the Auto Neg Restart bit in the PHY control register.
+ */
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+ if (ret_val)
+ return ret_val;
+
+ /* Does the user want to wait for Auto-Neg to complete here, or
+ * check at a later time (for example, callback routine).
+ */
+ if (phy->autoneg_wait_to_complete) {
+ ret_val = e1000_wait_autoneg(hw);
+ if (ret_val) {
+ DEBUGOUT("Error while waiting for autoneg to complete\n");
+ return ret_val;
+ }
+ }
+
+ hw->mac.get_link_status = true;
-out:
return ret_val;
}
/**
- * igb_setup_copper_link - Configure copper link settings
+ * e1000_setup_copper_link_generic - Configure copper link settings
* @hw: pointer to the HW structure
*
* Calls the appropriate function to configure the link for auto-neg or forced
@@ -1041,129 +1593,134 @@ out:
* to configure collision distance and flow control are called. If link is
* not established, we return -E1000_ERR_PHY (-2).
**/
-s32 igb_setup_copper_link(struct e1000_hw *hw)
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
{
s32 ret_val;
bool link;
+ DEBUGFUNC("e1000_setup_copper_link_generic");
+
if (hw->mac.autoneg) {
/* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
- ret_val = igb_copper_link_autoneg(hw);
+ ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
- goto out;
+ return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
- hw_dbg("Forcing Speed and Duplex\n");
+ DEBUGOUT("Forcing Speed and Duplex\n");
ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
- hw_dbg("Error Forcing Speed and Duplex\n");
- goto out;
+ DEBUGOUT("Error Forcing Speed and Duplex\n");
+ return ret_val;
}
}
/* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
- ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
+ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
+ &link);
if (ret_val)
- goto out;
+ return ret_val;
if (link) {
- hw_dbg("Valid link established!!!\n");
- igb_config_collision_dist(hw);
- ret_val = igb_config_fc_after_link_up(hw);
+ DEBUGOUT("Valid link established!!!\n");
+ hw->mac.ops.config_collision_dist(hw);
+ ret_val = e1000_config_fc_after_link_up_generic(hw);
} else {
- hw_dbg("Unable to establish link!!!\n");
+ DEBUGOUT("Unable to establish link!!!\n");
}
-out:
return ret_val;
}
/**
- * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
* @hw: pointer to the HW structure
*
* Calls the PHY setup function to force speed and duplex. Clears the
* auto-crossover to force MDI manually. Waits for link and returns
* successful if link up is successful, else -E1000_ERR_PHY (-2).
**/
-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
bool link;
+ DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- igb_phy_force_speed_duplex_setup(hw, &phy_data);
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- hw_dbg("IGP PSCR: %X\n", phy_data);
+ DEBUGOUT1("IGP PSCR: %X\n", phy_data);
- udelay(1);
+ usec_delay(1);
if (phy->autoneg_wait_to_complete) {
- hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+ DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link)
- hw_dbg("Link taking longer than expected.\n");
+ DEBUGOUT("Link taking longer than expected.\n");
/* Try once more */
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
- if (ret_val)
- goto out;
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
}
-out:
return ret_val;
}
/**
- * igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
* @hw: pointer to the HW structure
*
* Calls the PHY setup function to force speed and duplex. Clears the
* auto-crossover to force MDI manually. Resets the PHY to commit the
* changes. If time expires while waiting for link up, we reset the DSP.
- * After reset, TX_CLK and CRS on TX must be set. Return successful upon
+ * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
* successful completion, else return corresponding error code.
**/
-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
bool link;
+ DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
/* I210 and I211 devices support Auto-Crossover in forced operation. */
if (phy->type != e1000_phy_i210) {
/* Clear Auto-Crossover to force MDI manually. M88E1000
@@ -1172,45 +1729,49 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
&phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
}
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- igb_phy_force_speed_duplex_setup(hw, &phy_data);
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Reset the phy to commit changes. */
- ret_val = igb_phy_sw_reset(hw);
+ ret_val = hw->phy.ops.commit(hw);
if (ret_val)
- goto out;
+ return ret_val;
if (phy->autoneg_wait_to_complete) {
- hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+ DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
bool reset_dsp = true;
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
case M88E1112_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
case I210_I_PHY_ID:
reset_dsp = false;
break;
@@ -1219,9 +1780,10 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
reset_dsp = false;
break;
}
- if (!reset_dsp)
- hw_dbg("Link taking longer than expected.\n");
- else {
+
+ if (!reset_dsp) {
+ DEBUGOUT("Link taking longer than expected.\n");
+ } else {
/* We didn't get link.
* Reset the DSP and cross our fingers.
*/
@@ -1229,29 +1791,35 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
M88E1000_PHY_PAGE_SELECT,
0x001d);
if (ret_val)
- goto out;
- ret_val = igb_phy_reset_dsp(hw);
+ return ret_val;
+ ret_val = e1000_phy_reset_dsp_generic(hw);
if (ret_val)
- goto out;
+ return ret_val;
}
}
/* Try once more */
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
}
- if (hw->phy.type != e1000_phy_m88 ||
- hw->phy.id == I347AT4_E_PHY_ID ||
- hw->phy.id == M88E1112_E_PHY_ID ||
- hw->phy.id == I210_I_PHY_ID)
- goto out;
+ if (hw->phy.type != e1000_phy_m88)
+ return E1000_SUCCESS;
+ if (hw->phy.id == I347AT4_E_PHY_ID ||
+ hw->phy.id == M88E1340M_E_PHY_ID ||
+ hw->phy.id == M88E1112_E_PHY_ID)
+ return E1000_SUCCESS;
+ if (hw->phy.id == I210_I_PHY_ID)
+ return E1000_SUCCESS;
+ if ((hw->phy.id == M88E1543_E_PHY_ID) ||
+ (hw->phy.id == M88E1512_E_PHY_ID))
+ return E1000_SUCCESS;
ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
@@ -1260,24 +1828,87 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
phy_data |= M88E1000_EPSCR_TX_CLK_25;
ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
-out:
return ret_val;
}
/**
- * igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ * @hw: pointer to the HW structure
+ *
+ * Forces the speed and duplex settings of the PHY.
+ * This is a function pointer entry point only called by
+ * PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ e1000_phy_force_speed_duplex_setup(hw, &data);
+
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ /* Disable MDI-X support for 10/100 */
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ data &= ~IFE_PMC_AUTO_MDIX;
+ data &= ~IFE_PMC_FORCE_MDIX;
+
+ ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+ if (ret_val)
+ return ret_val;
+
+ DEBUGOUT1("IFE PMC: %X\n", data);
+
+ usec_delay(1);
+
+ if (phy->autoneg_wait_to_complete) {
+ DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link)
+ DEBUGOUT("Link taking longer than expected.\n");
+
+ /* Try once more */
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
+ if (ret_val)
+ return ret_val;
+ }
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
* @hw: pointer to the HW structure
* @phy_ctrl: pointer to current value of PHY_CONTROL
*
@@ -1288,17 +1919,18 @@ out:
* caller must write to the PHY_CONTROL register for these settings to
* take affect.
**/
-static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
- u16 *phy_ctrl)
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
{
struct e1000_mac_info *mac = &hw->mac;
u32 ctrl;
+ DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
/* Turn off flow control when forcing speed/duplex */
hw->fc.current_mode = e1000_fc_none;
/* Force speed/duplex on the mac */
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ctrl &= ~E1000_CTRL_SPD_SEL;
@@ -1312,33 +1944,32 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
ctrl &= ~E1000_CTRL_FD;
*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
- hw_dbg("Half Duplex\n");
+ DEBUGOUT("Half Duplex\n");
} else {
ctrl |= E1000_CTRL_FD;
*phy_ctrl |= MII_CR_FULL_DUPLEX;
- hw_dbg("Full Duplex\n");
+ DEBUGOUT("Full Duplex\n");
}
/* Forcing 10mb or 100mb? */
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100;
*phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
- hw_dbg("Forcing 100mb\n");
+ *phy_ctrl &= ~MII_CR_SPEED_1000;
+ DEBUGOUT("Forcing 100mb\n");
} else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl |= MII_CR_SPEED_10;
*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
- hw_dbg("Forcing 10mb\n");
+ DEBUGOUT("Forcing 10mb\n");
}
- igb_config_collision_dist(hw);
+ hw->mac.ops.config_collision_dist(hw);
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
}
/**
- * igb_set_d3_lplu_state - Sets low power link up state for D3
+ * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
* @hw: pointer to the HW structure
* @active: boolean used to enable/disable lplu
*
@@ -1351,25 +1982,27 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 data;
- if (!(hw->phy.ops.read_reg))
- goto out;
+ DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
- goto out;
+ return ret_val;
if (!active) {
data &= ~IGP02E1000_PM_D3_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
if (ret_val)
- goto out;
+ return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
@@ -1380,121 +2013,121 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
- goto out;
+ return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
- goto out;
+ return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = phy->ops.read_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
- goto out;
+ return ret_val;
}
} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
(phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
(phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= IGP02E1000_PM_D3_LPLU;
ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- data);
+ data);
if (ret_val)
- goto out;
+ return ret_val;
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
- goto out;
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
}
-out:
return ret_val;
}
/**
- * igb_check_downshift - Checks whether a downshift in speed occurred
+ * e1000_check_downshift_generic - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns 1
*
* A downshift is detected by querying the PHY link health.
**/
-s32 igb_check_downshift(struct e1000_hw *hw)
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data, offset, mask;
+ DEBUGFUNC("e1000_check_downshift_generic");
+
switch (phy->type) {
case e1000_phy_i210:
case e1000_phy_m88:
case e1000_phy_gg82563:
- offset = M88E1000_PHY_SPEC_STATUS;
- mask = M88E1000_PSSR_DOWNSHIFT;
+ offset = M88E1000_PHY_SPEC_STATUS;
+ mask = M88E1000_PSSR_DOWNSHIFT;
break;
case e1000_phy_igp_2:
- case e1000_phy_igp:
case e1000_phy_igp_3:
- offset = IGP01E1000_PHY_LINK_HEALTH;
- mask = IGP01E1000_PLHR_SS_DOWNGRADE;
+ offset = IGP01E1000_PHY_LINK_HEALTH;
+ mask = IGP01E1000_PLHR_SS_DOWNGRADE;
break;
default:
/* speed downshift not supported */
phy->speed_downgraded = false;
- ret_val = 0;
- goto out;
+ return E1000_SUCCESS;
}
ret_val = phy->ops.read_reg(hw, offset, &phy_data);
if (!ret_val)
- phy->speed_downgraded = (phy_data & mask) ? true : false;
+ phy->speed_downgraded = !!(phy_data & mask);
-out:
return ret_val;
}
/**
- * igb_check_polarity_m88 - Checks the polarity.
+ * e1000_check_polarity_m88 - Checks the polarity.
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns -E1000_ERR_PHY (-2)
*
* Polarity is determined based on the PHY specific status register.
**/
-s32 igb_check_polarity_m88(struct e1000_hw *hw)
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
+ DEBUGFUNC("e1000_check_polarity_m88");
+
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
/**
- * igb_check_polarity_igp - Checks the polarity.
+ * e1000_check_polarity_igp - Checks the polarity.
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns -E1000_ERR_PHY (-2)
@@ -1502,54 +2135,94 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw)
* Polarity is determined based on the PHY port status register, and the
* current speed (since there is no polarity at 100Mbps).
**/
-static s32 igb_check_polarity_igp(struct e1000_hw *hw)
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data, offset, mask;
+ DEBUGFUNC("e1000_check_polarity_igp");
+
/* Polarity is determined based on the speed of
* our connection.
*/
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- offset = IGP01E1000_PHY_PCS_INIT_REG;
- mask = IGP01E1000_PHY_POLARITY_MASK;
+ offset = IGP01E1000_PHY_PCS_INIT_REG;
+ mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
/* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
- offset = IGP01E1000_PHY_PORT_STATUS;
- mask = IGP01E1000_PSSR_POLARITY_REVERSED;
+ offset = IGP01E1000_PHY_PORT_STATUS;
+ mask = IGP01E1000_PSSR_POLARITY_REVERSED;
}
ret_val = phy->ops.read_reg(hw, offset, &data);
if (!ret_val)
- phy->cable_polarity = (data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
-out:
return ret_val;
}
/**
- * igb_wait_autoneg - Wait for auto-neg completion
+ * e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ * @hw: pointer to the HW structure
+ *
+ * Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 phy_data, offset, mask;
+
+ DEBUGFUNC("e1000_check_polarity_ife");
+
+ /* Polarity is determined based on the reversal feature being enabled.
+ */
+ if (phy->polarity_correction) {
+ offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+ mask = IFE_PESC_POLARITY_REVERSED;
+ } else {
+ offset = IFE_PHY_SPECIAL_CONTROL;
+ mask = IFE_PSC_FORCE_POLARITY;
+ }
+
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+ if (!ret_val)
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+
+ return ret_val;
+}
+
+/**
+ * e1000_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
* limit to expire, which ever happens first.
**/
-static s32 igb_wait_autoneg(struct e1000_hw *hw)
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 i, phy_status;
+ DEBUGFUNC("e1000_wait_autoneg");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
@@ -1560,7 +2233,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
break;
if (phy_status & MII_SR_AUTONEG_COMPLETE)
break;
- msleep(100);
+ msec_delay(100);
}
/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
@@ -1570,7 +2243,7 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
}
/**
- * igb_phy_has_link - Polls PHY for link
+ * e1000_phy_has_link_generic - Polls PHY for link
* @hw: pointer to the HW structure
* @iterations: number of times to poll for link
* @usec_interval: delay between polling attempts
@@ -1578,27 +2251,32 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
*
* Polls the PHY status register for link, 'iterations' number of times.
**/
-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success)
{
- s32 ret_val = 0;
+ s32 ret_val = E1000_SUCCESS;
u16 i, phy_status;
+ DEBUGFUNC("e1000_phy_has_link_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
+
for (i = 0; i < iterations; i++) {
/* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
- if (ret_val && usec_interval > 0) {
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
if (usec_interval >= 1000)
- mdelay(usec_interval/1000);
+ msec_delay(usec_interval/1000);
else
- udelay(usec_interval);
+ usec_delay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
@@ -1606,18 +2284,18 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
if (phy_status & MII_SR_LINK_STATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval/1000);
+ msec_delay(usec_interval/1000);
else
- udelay(usec_interval);
+ usec_delay(usec_interval);
}
- *success = (i < iterations) ? true : false;
+ *success = (i < iterations);
return ret_val;
}
/**
- * igb_get_cable_length_m88 - Determine cable length for m88 PHY
+ * e1000_get_cable_length_m88 - Determine cable length for m88 PHY
* @hw: pointer to the HW structure
*
* Reads the PHY specific status register to retrieve the cable length
@@ -1631,37 +2309,40 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* 3 110 - 140 meters
* 4 > 140 meters
**/
-s32 igb_get_cable_length_m88(struct e1000_hw *hw)
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data, index;
+ DEBUGFUNC("e1000_get_cable_length_m88");
+
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
phy->min_cable_length = e1000_m88_cable_length_table[index];
phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
- u16 phy_data, phy_data2, index, default_page, is_cm;
+ u16 phy_data, phy_data2, is_cm;
+ u16 index, default_page;
+
+ DEBUGFUNC("e1000_get_cable_length_m88_gen2");
switch (hw->phy.id) {
case I210_I_PHY_ID:
@@ -1685,28 +2366,30 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
&default_page);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
if (ret_val)
- goto out;
+ return ret_val;
/* Get cable length from PHY Cable Diagnostics Control Reg */
ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
&phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Check if the unit of cable length is meters or cm */
ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
if (ret_val)
- goto out;
+ return ret_val;
is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
@@ -1715,34 +2398,34 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
- /* Reset the page selec to its original value */
+ /* Reset the page select to its original value */
ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
default_page);
if (ret_val)
- goto out;
+ return ret_val;
break;
+
case M88E1112_E_PHY_ID:
/* Remember the original page select and set it to 5 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
&default_page);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
&phy_data);
if (ret_val)
- goto out;
+ return ret_val;
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
- if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
+
+ if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
+ return -E1000_ERR_PHY;
phy->min_cable_length = e1000_m88_cable_length_table[index];
phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
@@ -1754,20 +2437,18 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
default_page);
if (ret_val)
- goto out;
+ return ret_val;
break;
default:
- ret_val = -E1000_ERR_PHY;
- goto out;
+ return -E1000_ERR_PHY;
}
-out:
return ret_val;
}
/**
- * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
* @hw: pointer to the HW structure
*
* The automatic gain control (agc) normalizes the amplitude of the
@@ -1777,10 +2458,10 @@ out:
* into a lookup table to obtain the approximate cable length
* for each channel.
**/
-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 phy_data, i, agc_value = 0;
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
@@ -1791,26 +2472,26 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
IGP02E1000_PHY_AGC_D
};
+ DEBUGFUNC("e1000_get_cable_length_igp_2");
+
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
- (cur_agc_index == 0)) {
- ret_val = -E1000_ERR_PHY;
- goto out;
- }
+ (cur_agc_index == 0))
+ return -E1000_ERR_PHY;
/* Remove min & max AGC values from calculation. */
if (e1000_igp_2_cable_length_table[min_agc_index] >
@@ -1828,18 +2509,17 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_get_phy_info_m88 - Retrieve PHY information
+ * e1000_get_phy_info_m88 - Retrieve PHY information
* @hw: pointer to the HW structure
*
* Valid for only copper links. Read the PHY status register (sticky read)
@@ -1848,54 +2528,54 @@ out:
* special status register to determine MDI/MDIx and current speed. If
* speed is 1000, then determine cable length, local and remote receiver.
**/
-s32 igb_get_phy_info_m88(struct e1000_hw *hw)
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
bool link;
+ DEBUGFUNC("e1000_get_phy_info_m88");
+
if (phy->media_type != e1000_media_type_copper) {
- hw_dbg("Phy info is only valid for copper media\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Phy info is only valid for copper media\n");
+ return -E1000_ERR_CONFIG;
}
- ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
- hw_dbg("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
}
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
- ? true : false;
+ phy->polarity_correction = !!(phy_data &
+ M88E1000_PSCR_POLARITY_REVERSAL);
- ret_val = igb_check_polarity_m88(hw);
+ ret_val = e1000_check_polarity_m88(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
+ phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = phy->ops.get_cable_length(hw);
+ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok
@@ -1911,12 +2591,11 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
phy->remote_rx = e1000_1000t_rx_status_undefined;
}
-out:
return ret_val;
}
/**
- * igb_get_phy_info_igp - Retrieve igp PHY information
+ * e1000_get_phy_info_igp - Retrieve igp PHY information
* @hw: pointer to the HW structure
*
* Read PHY status to determine if link is up. If link is up, then
@@ -1924,44 +2603,45 @@ out:
* PHY port status to determine MDI/MDIx and speed. Based on the speed,
* determine on the cable length, local and remote receiver.
**/
-s32 igb_get_phy_info_igp(struct e1000_hw *hw)
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
bool link;
- ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ DEBUGFUNC("e1000_get_phy_info_igp");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
- hw_dbg("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
}
phy->polarity_correction = true;
- ret_val = igb_check_polarity_igp(hw);
+ ret_val = e1000_check_polarity_igp(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
- phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
+ phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok
@@ -1976,93 +2656,162 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
phy->remote_rx = e1000_1000t_rx_status_undefined;
}
-out:
return ret_val;
}
/**
- * igb_phy_sw_reset - PHY software reset
+ * e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ * @hw: pointer to the HW structure
+ *
+ * Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+ struct e1000_phy_info *phy = &hw->phy;
+ s32 ret_val;
+ u16 data;
+ bool link;
+
+ DEBUGFUNC("e1000_get_phy_info_ife");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+ if (ret_val)
+ return ret_val;
+
+ if (!link) {
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+ phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
+
+ if (phy->polarity_correction) {
+ ret_val = e1000_check_polarity_ife(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ /* Polarity is forced */
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
+ }
+
+ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+ if (ret_val)
+ return ret_val;
+
+ phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
+
+ /* The following parameters are undefined for 10/100 operation. */
+ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+ phy->local_rx = e1000_1000t_rx_status_undefined;
+ phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_phy_sw_reset_generic - PHY software reset
* @hw: pointer to the HW structure
*
* Does a software reset of the PHY by reading the PHY control register and
* setting/write the control register reset bit to the PHY.
**/
-s32 igb_phy_sw_reset(struct e1000_hw *hw)
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 phy_ctrl;
- if (!(hw->phy.ops.read_reg))
- goto out;
+ DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+ if (!hw->phy.ops.read_reg)
+ return E1000_SUCCESS;
ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
- goto out;
+ return ret_val;
phy_ctrl |= MII_CR_RESET;
ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
- goto out;
+ return ret_val;
- udelay(1);
+ usec_delay(1);
-out:
return ret_val;
}
/**
- * igb_phy_hw_reset - PHY hardware reset
+ * e1000_phy_hw_reset_generic - PHY hardware reset
* @hw: pointer to the HW structure
*
* Verify the reset block is not blocking us from resetting. Acquire
* semaphore (if necessary) and read/set/write the device control reset
* bit in the PHY. Wait the appropriate delay time for the device to
- * reset and relase the semaphore (if necessary).
+ * reset and release the semaphore (if necessary).
**/
-s32 igb_phy_hw_reset(struct e1000_hw *hw)
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val;
+ s32 ret_val;
u32 ctrl;
- ret_val = igb_check_reset_block(hw);
- if (ret_val) {
- ret_val = 0;
- goto out;
+ DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return E1000_SUCCESS;
}
ret_val = phy->ops.acquire(hw);
if (ret_val)
- goto out;
+ return ret_val;
- ctrl = rd32(E1000_CTRL);
- wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
- wrfl();
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+ E1000_WRITE_FLUSH(hw);
- udelay(phy->reset_delay_us);
+ usec_delay(phy->reset_delay_us);
- wr32(E1000_CTRL, ctrl);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ E1000_WRITE_FLUSH(hw);
- udelay(150);
+ usec_delay(150);
phy->ops.release(hw);
- ret_val = phy->ops.get_cfg_done(hw);
+ return phy->ops.get_cfg_done(hw);
+}
-out:
- return ret_val;
+/**
+ * e1000_get_cfg_done_generic - Generic configuration done
+ * @hw: pointer to the HW structure
+ *
+ * Generic function to wait 10 milli-seconds for configuration to complete
+ * and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw)
+{
+ DEBUGFUNC("e1000_get_cfg_done_generic");
+
+ msec_delay_irq(10);
+
+ return E1000_SUCCESS;
}
/**
- * igb_phy_init_script_igp3 - Inits the IGP3 PHY
+ * e1000_phy_init_script_igp3 - Inits the IGP3 PHY
* @hw: pointer to the HW structure
*
* Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
**/
-s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
{
- hw_dbg("Running IGP 3 PHY init script\n");
+ DEBUGOUT("Running IGP 3 PHY init script\n");
/* PHY init IGP 3 */
/* Enable rise/fall, 10-mode work in class-A */
@@ -2073,7 +2822,7 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
/* Increase Hybrid poly bias */
hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
- /* Add 4% to TX amplitude in Giga mode */
+ /* Add 4% to Tx amplitude in Gig mode */
hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
/* Disable trimming (TTT) */
hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
@@ -2134,147 +2883,207 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
/* Restart AN, Speed selection is 1000 */
hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
- return 0;
+ return E1000_SUCCESS;
}
/**
- * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * e1000_get_phy_type_from_id - Get PHY type from id
+ * @phy_id: phy_id read from the phy
+ *
+ * Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ switch (phy_id) {
+ case M88E1000_I_PHY_ID:
+ case M88E1000_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ case M88E1011_I_PHY_ID:
+ case M88E1543_E_PHY_ID:
+ case M88E1512_E_PHY_ID:
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1340M_E_PHY_ID:
+ phy_type = e1000_phy_m88;
+ break;
+ case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+ phy_type = e1000_phy_igp_2;
+ break;
+ case GG82563_E_PHY_ID:
+ phy_type = e1000_phy_gg82563;
+ break;
+ case IGP03E1000_E_PHY_ID:
+ phy_type = e1000_phy_igp_3;
+ break;
+ case IFE_E_PHY_ID:
+ case IFE_PLUS_E_PHY_ID:
+ case IFE_C_E_PHY_ID:
+ phy_type = e1000_phy_ife;
+ break;
+ case I82580_I_PHY_ID:
+ phy_type = e1000_phy_82580;
+ break;
+ case I210_I_PHY_ID:
+ phy_type = e1000_phy_i210;
+ break;
+ default:
+ phy_type = e1000_phy_unknown;
+ break;
+ }
+ return phy_type;
+}
+
+/**
+ * e1000_determine_phy_address - Determines PHY address.
+ * @hw: pointer to the HW structure
+ *
+ * This uses a trial and error method to loop through possible PHY
+ * addresses. It tests each by reading the PHY ID registers and
+ * checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+ u32 phy_addr = 0;
+ u32 i;
+ enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+ hw->phy.id = phy_type;
+
+ for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+ hw->phy.addr = phy_addr;
+ i = 0;
+
+ do {
+ e1000_get_phy_id(hw);
+ phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+ /* If phy_type is valid, break - we found our
+ * PHY address
+ */
+ if (phy_type != e1000_phy_unknown)
+ return E1000_SUCCESS;
+
+ msec_delay(1);
+ i++;
+ } while (i < 10);
+ }
+
+ return -E1000_ERR_PHY_TYPE;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
- * driver unload, restore the link to previous settings.
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
**/
-void igb_power_up_phy_copper(struct e1000_hw *hw)
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
/**
- * igb_power_down_phy_copper - Power down copper PHY
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
* @hw: pointer to the HW structure
*
- * Power down PHY to save power when interface is down and wake on lan
- * is not enabled.
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
**/
-void igb_power_down_phy_copper(struct e1000_hw *hw)
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ msec_delay(1);
}
/**
- * igb_check_polarity_82580 - Checks the polarity.
+ * e1000_check_polarity_82577 - Checks the polarity.
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns -E1000_ERR_PHY (-2)
*
* Polarity is determined based on the PHY specific status register.
**/
-static s32 igb_check_polarity_82580(struct e1000_hw *hw)
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
+ DEBUGFUNC("e1000_check_polarity_82577");
- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
- phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
/**
- * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
+ * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ * Calls the PHY setup function to force speed and duplex.
**/
-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
bool link;
+ DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- igb_phy_force_speed_duplex_setup(hw, &phy_data);
+ e1000_phy_force_speed_duplex_setup(hw, &phy_data);
ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
if (ret_val)
- goto out;
-
- /* Clear Auto-Crossover to force MDI manually. 82580 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
-
- ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
- if (ret_val)
- goto out;
-
- hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
+ return ret_val;
- udelay(1);
+ usec_delay(1);
if (phy->autoneg_wait_to_complete) {
- hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
+ DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link)
- hw_dbg("Link taking longer than expected.\n");
+ DEBUGOUT("Link taking longer than expected.\n");
/* Try once more */
- ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
- if (ret_val)
- goto out;
+ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+ 100000, &link);
}
-out:
return ret_val;
}
/**
- * igb_get_phy_info_82580 - Retrieve I82580 PHY information
+ * e1000_get_phy_info_82577 - Retrieve I82577 PHY information
* @hw: pointer to the HW structure
*
* Read PHY status to determine if link is up. If link is up, then
@@ -2282,44 +3091,45 @@ out:
* PHY port status to determine MDI/MDIx and speed. Based on the speed,
* determine on the cable length, local and remote receiver.
**/
-s32 igb_get_phy_info_82580(struct e1000_hw *hw)
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
bool link;
- ret_val = igb_phy_has_link(hw, 1, 0, &link);
+ DEBUGFUNC("e1000_get_phy_info_82577");
+
+ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
if (ret_val)
- goto out;
+ return ret_val;
if (!link) {
- hw_dbg("Phy info is only valid if link is up\n");
- ret_val = -E1000_ERR_CONFIG;
- goto out;
+ DEBUGOUT("Phy info is only valid if link is up\n");
+ return -E1000_ERR_CONFIG;
}
phy->polarity_correction = true;
- ret_val = igb_check_polarity_82580(hw);
+ ret_val = e1000_check_polarity_82577(hw);
if (ret_val)
- goto out;
+ return ret_val;
- ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
if (ret_val)
- goto out;
+ return ret_val;
- phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
+ phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
- if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
- I82580_PHY_STATUS2_SPEED_1000MBPS) {
+ if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+ I82577_PHY_STATUS2_SPEED_1000MBPS) {
ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
- goto out;
+ return ret_val;
ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val)
- goto out;
+ return ret_val;
phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
? e1000_1000t_rx_status_ok
@@ -2334,63 +3144,64 @@ s32 igb_get_phy_info_82580(struct e1000_hw *hw)
phy->remote_rx = e1000_1000t_rx_status_undefined;
}
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_get_cable_length_82580 - Determine cable length for 82580 PHY
+ * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
* @hw: pointer to the HW structure
*
* Reads the diagnostic status register and verifies result is valid before
* placing it in the phy_cable_length field.
**/
-s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data, length;
- ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
+ DEBUGFUNC("e1000_get_cable_length_82577");
+
+ ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
- length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
- I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = -E1000_ERR_PHY;
+ return -E1000_ERR_PHY;
phy->cable_length = length;
-out:
- return ret_val;
+ return E1000_SUCCESS;
}
/**
- * igb_write_phy_reg_gs40g - Write GS40G PHY register
+ * e1000_write_phy_reg_gs40g - Write GS40G PHY register
* @hw: pointer to the HW structure
- * @offset: lower half is register offset to write to
- * upper half is page to use.
+ * @offset: register offset to write to
* @data: data to write at register offset
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
u16 page = offset >> GS40G_PAGE_SHIFT;
+ DEBUGFUNC("e1000_write_phy_reg_gs40g");
+
offset = offset & GS40G_OFFSET_MASK;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
if (ret_val)
goto release;
- ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+ ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
release:
hw->phy.ops.release(hw);
@@ -2398,7 +3209,7 @@ release:
}
/**
- * igb_read_phy_reg_gs40g - Read GS40G PHY register
+ * e1000_read_phy_reg_gs40g - Read GS40G PHY register
* @hw: pointer to the HW structure
* @offset: lower half is register offset to read to
* upper half is page to use.
@@ -2407,20 +3218,22 @@ release:
* Acquires semaphore, if necessary, then reads the data in the PHY register
* at the offset. Release any acquired semaphores before exiting.
**/
-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
u16 page = offset >> GS40G_PAGE_SHIFT;
+ DEBUGFUNC("e1000_read_phy_reg_gs40g");
+
offset = offset & GS40G_OFFSET_MASK;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+ ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
if (ret_val)
goto release;
- ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+ ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
release:
hw->phy.ops.release(hw);
@@ -2428,41 +3241,156 @@ release:
}
/**
- * igb_set_master_slave_mode - Setup PHY for Master/slave mode
+ * e1000_read_phy_reg_mphy - Read mPHY control register
* @hw: pointer to the HW structure
+ * @address: address to be read
+ * @data: pointer to the read data
*
- * Sets up Master/slave mode
+ * Reads the mPHY control register in the PHY at offset and stores the
+ * information read to data.
**/
-static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
{
- s32 ret_val;
- u16 phy_data;
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
- /* Resolve Master/Slave mode */
- ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
- if (ret_val)
- return ret_val;
+ DEBUGFUNC("e1000_read_phy_reg_mphy");
- /* load defaults for future use */
- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
- e1000_ms_force_master :
- e1000_ms_force_slave) : e1000_ms_auto;
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
- switch (hw->phy.ms_type) {
- case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
- break;
- case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
- break;
- case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
- /* fall-through */
- default:
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK &
+ ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ *data = E1000_READ_REG(hw, E1000_MPHY_DATA);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_write_phy_reg_mphy - Write mPHY control register
+ * @hw: pointer to the HW structure
+ * @address: address to write to
+ * @data: data to write to register at offset
+ * @line_override: used when we want to use different line than default one
+ *
+ * Writes data to mPHY control register.
+ **/
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override)
+{
+ u32 mphy_ctrl = 0;
+ bool locked = false;
+ bool ready;
+
+ DEBUGFUNC("e1000_write_phy_reg_mphy");
+
+ /* Check if mPHY is ready to read/write operations */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* Check if mPHY access is disabled and enable it if so */
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) {
+ locked = true;
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ mphy_ctrl |= E1000_MPHY_ENA_ACCESS;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+ }
+
+ /* Set the address that we want to read */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+
+ /* We mask address, because we want to use only current lane */
+ if (line_override)
+ mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ else
+ mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE;
+ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) |
+ (address & E1000_MPHY_ADDRESS_MASK);
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl);
+
+ /* Read data from the address */
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
+
+ /* Disable access to mPHY if it was originally disabled */
+ if (locked)
+ ready = e1000_is_mphy_ready(hw);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
+ return E1000_SUCCESS;
+}
+
+/**
+ * e1000_is_mphy_ready - Check if mPHY control register is not busy
+ * @hw: pointer to the HW structure
+ *
+ * Returns mPHY control register status.
+ **/
+bool e1000_is_mphy_ready(struct e1000_hw *hw)
+{
+ u16 retry_count = 0;
+ u32 mphy_ctrl = 0;
+ bool ready = false;
+
+ while (retry_count < 2) {
+ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL);
+ if (mphy_ctrl & E1000_MPHY_BUSY) {
+ usec_delay(20);
+ retry_count++;
+ continue;
+ }
+ ready = true;
break;
}
- return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+ if (!ready)
+ DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n");
+
+ return ready;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 784fd1c40989..a109c914ce8d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,122 +25,91 @@
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
-enum e1000_ms_type {
- e1000_ms_hw_default = 0,
- e1000_ms_force_master,
- e1000_ms_force_slave,
- e1000_ms_auto
-};
-
-enum e1000_smart_speed {
- e1000_smart_speed_default = 0,
- e1000_smart_speed_on,
- e1000_smart_speed_off
-};
-
-s32 igb_check_downshift(struct e1000_hw *hw);
-s32 igb_check_reset_block(struct e1000_hw *hw);
-s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
-s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
-s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
-s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-s32 igb_get_cable_length_m88(struct e1000_hw *hw);
-s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
-s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
-s32 igb_get_phy_id(struct e1000_hw *hw);
-s32 igb_get_phy_info_igp(struct e1000_hw *hw);
-s32 igb_get_phy_info_m88(struct e1000_hw *hw);
-s32 igb_phy_sw_reset(struct e1000_hw *hw);
-s32 igb_phy_hw_reset(struct e1000_hw *hw);
-s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-s32 igb_setup_copper_link(struct e1000_hw *hw);
-s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
+s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 e1000_check_downshift_generic(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000_get_phy_id(struct e1000_hw *hw);
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
-void igb_power_up_phy_copper(struct e1000_hw *hw);
-void igb_power_down_phy_copper(struct e1000_hw *hw);
-s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
-s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
-s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
-s32 igb_get_phy_info_82580(struct e1000_hw *hw);
-s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
-s32 igb_get_cable_length_82580(struct e1000_hw *hw);
-s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
-s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
-s32 igb_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32 e1000_determine_phy_address(struct e1000_hw *hw);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
+s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
+ bool line_override);
+bool e1000_is_mphy_ready(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define I82580_ADDR_REG 16
-#define I82580_CFG_REG 22
-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
-#define I82580_CTRL_REG 23
-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
-
-/* 82580 specific PHY registers */
-#define I82580_PHY_CTRL_2 18
-#define I82580_PHY_LBK_CTRL 19
-#define I82580_PHY_STATUS_2 26
-#define I82580_PHY_DIAG_STATUS 31
-
-/* I82580 PHY Status 2 */
-#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82580_PHY_STATUS2_MDIX 0x0800
-#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
-#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
-
-/* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82580 PHY Diagnostics Status */
-#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* 82580 PHY Power Management */
-#define E1000_82580_PHY_POWER_MGMT 0xE14
-#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
-#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
-#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
-#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
-
-/* Enable flexible speed on link-up */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
/* GS40G - I210 PHY defines */
#define GS40G_PAGE_SELECT 0x16
@@ -154,7 +120,133 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
-#define GS40G_LINE_LB 0x4000
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT 0xE14
+#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
+
+#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
+#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
+#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
+#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
+#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET 0x00
+#define E1000_SFF_IDENTIFIER_SFF 0x02
+#define E1000_SFF_IDENTIFIER_SFP 0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct sfp_e1000_flags {
+ u8 e1000_base_sx:1;
+ u8 e1000_base_lx:1;
+ u8 e1000_base_cx:1;
+ u8 e1000_base_t:1;
+ u8 e100_base_lx:1;
+ u8 e100_base_fx:1;
+ u8 e10_base_bx10:1;
+ u8 e10_base_px:1;
+};
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
+#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
+#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53af..d488c516a72f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,121 +25,172 @@
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
-#define E1000_CTRL 0x00000 /* Device Control - RW */
-#define E1000_STATUS 0x00008 /* Device Status - RO */
-#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
-#define E1000_EERD 0x00014 /* EEPROM Read - RW */
-#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
-#define E1000_MDIC 0x00020 /* MDI Control - RW */
-#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
-#define E1000_SCTL 0x00024 /* SerDes Control - RW */
-#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
-#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
-#define E1000_FCT 0x00030 /* Flow Control Type - RW */
-#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
-#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
-#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
-#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
-#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
-#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
-#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
-#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
-#define E1000_RCTL 0x00100 /* RX Control - RW */
-#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
-#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
-#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
-#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
-#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
-#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
-#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
-#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
-#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
-#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
-#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
-#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
-#define E1000_TCTL 0x00400 /* TX Control - RW */
-#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */
-#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */
-#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
-#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
-#define E1000_LEDMUX 0x08130 /* LED MUX Control */
-#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
-#define E1000_PBS 0x01008 /* Packet Buffer Size */
-#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
-#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
-#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
-#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
-#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
-#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
-#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
-#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
-#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
-#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
-#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
-#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
-#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
-#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
-#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
-#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
+#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
+#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
+#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
+#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_LEDMUX 0x08130 /* LED MUX Control */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
+#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
+#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
+#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
+#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
+#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
+#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
+#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
+#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
+#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
+#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
+#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
+#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
+#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
+#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
+#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL 0x12038
+#define E1000_I210_FLMNGDATA 0x1203C
+#define E1000_I210_FLMNGCNT 0x12040
-/* IEEE 1588 TIMESYNCH */
-#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
-#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
-#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
-#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
-#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
-#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
-#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
-#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
-#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
-#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
-#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
-#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
-#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
-#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
-#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
-#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+#define E1000_I210_FLSWCTL 0x12048
+#define E1000_I210_FLSWDATA 0x1204C
+#define E1000_I210_FLSWCNT 0x12050
-/* Filtering Registers */
-#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
-#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
-#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
-#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
-#define E1000_SAQF0 E1000_SAQF(0)
-#define E1000_DAQF0 E1000_DAQF(0)
-#define E1000_SPQF0 E1000_SPQF(0)
-#define E1000_FTQF0 E1000_FTQF(0)
-#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
-#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
-
-#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+#define E1000_I210_FLA 0x1201C
-/* DMA Coalescing registers */
-#define E1000_DMACR 0x02508 /* Control Register */
-#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
-#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
-#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
-#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
-#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
+#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
+#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
-/* TX Rate Limit Registers */
-#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
-#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
-#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+/* QAV Tx mode control register */
+#define E1000_I210_TQAVCTRL 0x3570
-/* Split and Replication RX Control - RW */
-#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+/* QAV Tx mode control register bitfields masks */
+/* QAV enable */
+#define E1000_TQAVCTRL_MODE (1 << 0)
+/* Fetching arbitration type */
+#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
+/* Fetching timer enable */
+#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
+/* Launch arbitration type */
+#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
+/* Launch timer enable */
+#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
+/* SP waits for SR enable */
+#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
+/* Fetching timer correction */
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
+#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
+ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
-/* Thermal sensor configuration and status registers */
-#define E1000_THMJT 0x08100 /* Junction Temperature */
-#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
-#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
-#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
-#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+/* High credit registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
+
+/* Queues fetch arbitration priority control register */
+#define E1000_I210_TQAVARBCTRL 0x3574
+/* Queues priority masks where _n and _p can be 0-3. */
+#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n)))
+/* QAV Tx mode control registers where _n can be 0 or 1. */
+#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
+
+/* QAV Tx mode control register bitfields masks */
+#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
+#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
+#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
+
+/* Good transmitted packets counter registers */
+#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
+
+/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
+#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n)))
+
+#define E1000_MMDAC 13 /* MMD Access Control */
+#define E1000_MMDAAD 14 /* MMD Access Address/Data */
/* Convenience macros
*
@@ -151,250 +199,442 @@
* Example usage:
* E1000_RDBAL_REG(current_rx_queue)
*/
-#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
- : (0x0C000 + ((_n) * 0x40)))
-#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
- : (0x0C004 + ((_n) * 0x40)))
-#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
- : (0x0C008 + ((_n) * 0x40)))
-#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
- : (0x0C00C + ((_n) * 0x40)))
-#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
- : (0x0C010 + ((_n) * 0x40)))
-#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
- : (0x0C018 + ((_n) * 0x40)))
-#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
- : (0x0C028 + ((_n) * 0x40)))
-#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
- : (0x0E000 + ((_n) * 0x40)))
-#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
- : (0x0E004 + ((_n) * 0x40)))
-#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
- : (0x0E008 + ((_n) * 0x40)))
-#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
- : (0x0E010 + ((_n) * 0x40)))
-#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
- : (0x0E018 + ((_n) * 0x40)))
-#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
- : (0x0E028 + ((_n) * 0x40)))
-#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
- (0x0C014 + ((_n) * 0x40)))
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+ (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
-#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
- (0x0E014 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+ (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
-#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
- : (0x0E038 + ((_n) * 0x40)))
-#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
- : (0x0E03C + ((_n) * 0x40)))
-#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
-#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */
-#define E1000_DTXCTL 0x03590 /* DMA TX Control - RW */
-#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
-#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
-#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
-#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
-#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
-#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
-#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
-#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
-#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
-#define E1000_COLC 0x04028 /* Collision Count - R/clr */
-#define E1000_DC 0x04030 /* Defer Count - R/clr */
-#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */
-#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
-#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
-#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
-#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */
-#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */
-#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */
-#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */
-#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */
-#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */
-#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */
-#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */
-#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */
-#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */
-#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */
-#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */
-#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */
-#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */
-#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */
-#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */
-#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */
-#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */
-#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */
-#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */
-#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */
-#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */
-#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */
-#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */
-#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */
-#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
-#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */
-#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */
-#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */
-#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */
-#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */
-#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */
-#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */
-#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */
-#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */
-#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */
-#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */
-#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */
-#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */
-#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */
-#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
-#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
-#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
-#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
-/* Interrupt Cause Rx Packet Timer Expire Count */
-#define E1000_ICRXPTC 0x04104
-/* Interrupt Cause Rx Absolute Timer Expire Count */
-#define E1000_ICRXATC 0x04108
-/* Interrupt Cause Tx Packet Timer Expire Count */
-#define E1000_ICTXPTC 0x0410C
-/* Interrupt Cause Tx Absolute Timer Expire Count */
-#define E1000_ICTXATC 0x04110
-/* Interrupt Cause Tx Queue Empty Count */
-#define E1000_ICTXQEC 0x04118
-/* Interrupt Cause Tx Queue Minimum Threshold Count */
-#define E1000_ICTXQMTC 0x0411C
-/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
-#define E1000_ICRXDMTC 0x04120
-#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
-#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
-#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
-#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
-#define E1000_CBTMPC 0x0402C /* Circuit Breaker TX Packet Count */
-#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
-#define E1000_CBRMPC 0x040FC /* Circuit Breaker RX Packet Count */
-#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
-#define E1000_HGPTC 0x04118 /* Host Good Packets TX Count */
-#define E1000_HTCBDPC 0x04124 /* Host TX Circuit Breaker Dropped Count */
-#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
-#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
-#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
-#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
-#define E1000_LENERRS 0x04138 /* Length Errors Count */
-#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
-#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
-#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
-#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
-#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */
-#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
-#define E1000_RLPML 0x05004 /* RX Long Packet Max Length */
-#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
-#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
-#define E1000_RA 0x05400 /* Receive Address - RW Array */
-#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
-#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
-#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
-#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
-#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
-#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
-#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
-#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
-#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
-#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
-#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
-#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
-#define E1000_WUC 0x05800 /* Wakeup Control - RW */
-#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
-#define E1000_WUS 0x05810 /* Wakeup Status - RO */
-#define E1000_MANC 0x05820 /* Management Control - RW */
-#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
-#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
-
-#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
-#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
-#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
-#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
-#define E1000_GCR 0x05B00 /* PCI-Ex Control */
-#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
-#define E1000_SWSM 0x05B50 /* SW Semaphore */
-#define E1000_FWSM 0x05B54 /* FW Semaphore */
-#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+ (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+ (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
+#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
+#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+/* Same as TXPBS, renamed for newer Si - RW */
+#define E1000_ITPBS 0x03404
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
+#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
+#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
+#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
+#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
+#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+/* DMA Tx Max Total Allow Size Reqs - RW */
+#define E1000_DTXMXSZRQ 0x03540
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
-/* RSS registers */
-#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
-#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
-#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/
-#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
-/* MSI-X Allocation Register (_i) - RW */
-#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
-/* Redirection Table - RW Array */
-#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
-#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+/* LinkSec */
+#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
+#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
+#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
+#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
+#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
+#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
+#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
+#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
+#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
+#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
+#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
+#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
+#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
+#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
+#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
+#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
+#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
+#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
+#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
+#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
+#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
+#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
+#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
+#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
+#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
+#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
+/* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
+/* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
+#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
+#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
+/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
+#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
+#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
+/* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
+/* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
+#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
+/* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
+#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
+#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
+#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
+#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS 0x04138 /* Length Errors Count */
+#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
+#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
+#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
+#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
+#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
+#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
+/* Flexible Host Filter Table */
+#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
+/* Ext Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+#define E1000_FWSTS 0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
+#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
+#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
+#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
/* VT Registers */
-#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
-#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
-#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
-#define E1000_VFRE 0x00C8C /* VF Receive Enables */
-#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
-#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
-#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
-#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
-#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
-#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
-#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
-#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB 0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
/* These act per VF so an array friendly macro is used */
-#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
-#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
-#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
-#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
-
-#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
-#define rd32(reg) (readl(hw->hw_addr + reg))
-#define wrfl() ((void)rd32(E1000_STATUS))
-
-#define array_wr32(reg, offset, value) \
- (writel(value, hw->hw_addr + reg + ((offset) << 2)))
-#define array_rd32(reg, offset) \
- (readl(hw->hw_addr + reg + ((offset) << 2)))
+#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+/* VLAN Virtual Machine Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
+#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
+#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
+#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
+#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
+/* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
+/* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
+/* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
+/* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
+/* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
+/* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
+/* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
+#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
+#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
+#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
+#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
+#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
/* DMA Coalescing registers */
+#define E1000_DMACR 0x02508 /* Control Register */
+#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
-/* Energy Efficient Ethernet "EEE" register */
-#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
-#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */
-#define E1000_EEE_SU 0X0E34 /* EEE Setup */
-#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
-#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
-#define E1000_MMDAC 13 /* MMD Access Control */
-#define E1000_MMDAAD 14 /* MMD Access Address/Data */
-
-/* Thermal Sensor Register */
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS 0x05BA8
+
+#define E1000_PROXYS 0x5F64 /* Proxying Status */
+#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+/* Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU 0x0E34 /* EEE Setup */
+#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
+
/* OS2BMC Registers */
#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
-#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
-#define E1000_I210_FLMNGCTL 0x12038
-#define E1000_I210_FLMNGDATA 0x1203C
-#define E1000_I210_FLMNGCNT 0x12040
-
-#define E1000_I210_FLSWCTL 0x12048
-#define E1000_I210_FLSWDATA 0x1204C
-#define E1000_I210_FLSWCNT 0x12050
-
-#define E1000_I210_FLA 0x1201C
-
-#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
-#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
-
#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9d6c075e232d..21a2e28fe065 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,98 +12,138 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-
/* Linux PRO/1000 Ethernet Driver main header file */
#ifndef _IGB_H_
#define _IGB_H_
-#include "e1000_mac.h"
+#include <linux/kobject.h>
+
+#ifndef IGB_NO_LRO
+#include <net/tcp.h>
+#endif
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+struct igb_adapter;
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+#define IGB_DCA
+#endif
+#ifdef IGB_DCA
+#include <linux/dca.h>
+#endif
+
+#include "kcompat.h"
+
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+#include "e1000_api.h"
#include "e1000_82575.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args)
+
+#define PFX "igb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+ (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+ __func__ , ## args))
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include <linux/timecounter.h>
+#else
#include <linux/clocksource.h>
+#endif /* HAVE_INCLUDE_TIMECOUNTER_H */
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/bitops.h>
-#include <linux/if_vlan.h>
+#endif /* HAVE_PTP_1588_CLOCK */
+
+#ifdef HAVE_I2C_SUPPORT
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-
-struct igb_adapter;
-
-#define E1000_PCS_CFG_IGN_SD 1
+#endif /* HAVE_I2C_SUPPORT */
/* Interrupt defines */
-#define IGB_START_ITR 648 /* ~6000 ints/sec */
-#define IGB_4K_ITR 980
-#define IGB_20K_ITR 196
-#define IGB_70K_ITR 56
+#define IGB_START_ITR 648 /* ~6000 ints/sec */
+#define IGB_4K_ITR 980
+#define IGB_20K_ITR 196
+#define IGB_70K_ITR 56
+
+/* Interrupt modes, as used by the IntMode paramter */
+#define IGB_INT_MODE_LEGACY 0
+#define IGB_INT_MODE_MSI 1
+#define IGB_INT_MODE_MSIX 2
/* TX/RX descriptor defines */
-#define IGB_DEFAULT_TXD 256
-#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
-#define IGB_MAX_TXD 4096
+#define IGB_DEFAULT_TXD 256
+#define IGB_DEFAULT_TX_WORK 128
+#define IGB_MIN_TXD 80
+#define IGB_MAX_TXD 4096
+
+#define IGB_DEFAULT_RXD 256
+#define IGB_MIN_RXD 80
+#define IGB_MAX_RXD 4096
-#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
-#define IGB_MAX_RXD 4096
+#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
+#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
-#define IGB_DEFAULT_ITR 3 /* dynamic */
-#define IGB_MAX_ITR_USECS 10000
-#define IGB_MIN_ITR_USECS 10
-#define NON_Q_VECTORS 1
-#define MAX_Q_VECTORS 8
+#define NON_Q_VECTORS 1
+#define MAX_Q_VECTORS 10
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES 8
-#define IGB_MAX_RX_QUEUES_82575 4
-#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 8
-#define IGB_MAX_VF_MC_ENTRIES 30
-#define IGB_MAX_VF_FUNCTIONS 8
-#define IGB_MAX_VFTA_ENTRIES 128
-#define IGB_82576_VF_DEV_ID 0x10CA
-#define IGB_I350_VF_DEV_ID 0x1520
-
-/* NVM version defines */
-#define IGB_MAJOR_MASK 0xF000
-#define IGB_MINOR_MASK 0x0FF0
-#define IGB_BUILD_MASK 0x000F
-#define IGB_COMB_VER_MASK 0x00FF
-#define IGB_MAJOR_SHIFT 12
-#define IGB_MINOR_SHIFT 4
-#define IGB_COMB_VER_SHFT 8
-#define IGB_NVM_VER_INVALID 0xFFFF
-#define IGB_ETRACK_SHIFT 16
-#define NVM_ETRACK_WORD 0x0042
-#define NVM_COMB_VER_OFF 0x0083
-#define NVM_COMB_VER_PTR 0x003d
+#define IGB_MAX_RX_QUEUES 16
+#define IGB_MAX_RX_QUEUES_82575 4
+#define IGB_MAX_RX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 16
+
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_82576_VF_DEV_ID 0x10CA
+#define IGB_I350_VF_DEV_ID 0x1520
+#define IGB_MAX_UTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define OUI_LEN 3
+#define IGB_MAX_VMDQ_QUEUES 8
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
u16 vlans_enabled;
+ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
+ u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
u32 flags;
unsigned long last_nack;
+#ifdef IFLA_VF_MAX
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
bool spoofchk_enabled;
+#endif
+#endif
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -128,30 +168,96 @@ struct vf_data_storage {
#define IGB_TX_HTHRESH 1
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
adapter->msix_entries) ? 1 : 4)
-#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 more,
+ * this adds roughly 448 bytes of extra data meaning the smallest
+ * allocation we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_2048 2048
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
-#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RXBUFFER_16384 16384
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#if MAX_SKB_FRAGS < 8
+#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
+#else
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
+#endif
+
+
+/* Packet Buffer allocations */
+#define IGB_PBA_BYTES_SHIFT 0xA
+#define IGB_TX_HEAD_ADDR_SHIFT 7
+#define IGB_PBA_TX_MASK 0xFFFF0000
+
+#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define IGB_EEPROM_APME 0x0400
+#define IGB_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
#ifndef IGB_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define IGB_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define IGB_MNG_VLAN_NONE -1
+#define IGB_MNG_VLAN_NONE -1
+
+#ifndef IGB_NO_LRO
+#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
+struct igb_lro_stats {
+ u32 flushed;
+ u32 coal;
+};
+
+/*
+ * igb_lro_header - header format to be aggregated by LRO
+ * @iph: IP header without options
+ * @tcp: TCP header
+ * @ts: Optional TCP timestamp data in TCP options
+ *
+ * This structure relies on the check above that verifies that the header
+ * is IPv4 and does not contain any options.
+ */
+struct igb_lrohdr {
+ struct iphdr iph;
+ struct tcphdr th;
+ __be32 ts[0];
+};
+
+struct igb_lro_list {
+ struct sk_buff_head active;
+ struct igb_lro_stats stats;
+};
+
+#endif /* IGB_NO_LRO */
+struct igb_cb {
+#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ union { /* Union defining head/tail partner */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ };
+#endif
+ __be32 tsecr; /* timestamp echo response */
+ u32 tsval; /* timestamp value in host order */
+ u32 next_seq; /* next expected sequence number */
+ u16 free; /* 65521 minus total size */
+ u16 mss; /* size of data portion of packet */
+ u16 append_cnt; /* number of skb's appended */
+#endif /* IGB_NO_LRO */
+#ifdef HAVE_VLAN_RX_REGISTER
+ u16 vid; /* VLAN tag */
+#endif
+};
+#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
enum igb_tx_flags {
/* cmd_type flags */
@@ -165,30 +271,28 @@ enum igb_tx_flags {
};
/* VLAN info */
-#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IGB_TX_FLAGS_VLAN_SHIFT 16
+#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT 16
-/* The largest size we can write to the descriptor is 65535. In order to
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
-#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_TXD_PWR 15
#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-
-/* EEPROM byte offsets */
-#define IGB_SFF_8472_SWAP 0x5C
-#define IGB_SFF_8472_COMP 0x5E
-
-/* Bitmasks */
-#define IGB_SFF_ADDRESSING_MODE 0x4
-#define IGB_SFF_8472_UNSUP 0x00
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#ifndef MAX_SKB_FRAGS
+#define DESC_NEEDED 4
+#elif (MAX_SKB_FRAGS < 16)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#else
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+#endif
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
+ * so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
@@ -196,6 +300,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@@ -203,15 +308,18 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ struct sk_buff *skb;
+#else
struct page *page;
- unsigned int page_offset;
+ u32 page_offset;
+#endif
};
struct igb_tx_queue_stats {
u64 packets;
u64 bytes;
u64 restart_queue;
- u64 restart_queue2;
};
struct igb_rx_queue_stats {
@@ -222,6 +330,18 @@ struct igb_rx_queue_stats {
u64 alloc_failed;
};
+struct igb_rx_packet_stats {
+ u64 ipv4_packets; /* IPv4 headers processed */
+ u64 ipv4e_packets; /* IPv4E headers with extensions processed */
+ u64 ipv6_packets; /* IPv6 headers processed */
+ u64 ipv6e_packets; /* IPv6E headers with extensions processed */
+ u64 tcp_packets; /* TCP headers processed */
+ u64 udp_packets; /* UDP headers processed */
+ u64 sctp_packets; /* SCTP headers processed */
+ u64 nfs_packets; /* NFS headers processe */
+ u64 other_packets;
+};
+
struct igb_ring_container {
struct igb_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -232,23 +352,22 @@ struct igb_ring_container {
};
struct igb_ring {
- struct igb_q_vector *q_vector; /* backlink to q_vector */
- struct net_device *netdev; /* back pointer to net_device */
- struct device *dev; /* device pointer for dma mapping */
+ struct igb_q_vector *q_vector; /* backlink to q_vector */
+ struct net_device *netdev; /* back pointer to net_device */
+ struct device *dev; /* device for dma mapping */
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
- unsigned long last_rx_timestamp;
- void *desc; /* descriptor ring memory */
- unsigned long flags; /* ring specific flags */
- void __iomem *tail; /* pointer to ring tail register */
+ void *desc; /* descriptor ring memory */
+ unsigned long flags; /* ring specific flags */
+ void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
- unsigned int size; /* length of desc. ring in bytes */
+ unsigned int size; /* length of desc. ring in bytes */
- u16 count; /* number of desc. in the ring */
- u8 queue_index; /* logical index of the ring*/
- u8 reg_idx; /* physical index of the ring */
+ u16 count; /* number of desc. in the ring */
+ u8 queue_index; /* logical index of the ring*/
+ u8 reg_idx; /* physical index of the ring */
/* everything past this point are written often */
u16 next_to_clean;
@@ -259,16 +378,22 @@ struct igb_ring {
/* TX */
struct {
struct igb_tx_queue_stats tx_stats;
- struct u64_stats_sync tx_syncp;
- struct u64_stats_sync tx_syncp2;
};
/* RX */
struct {
- struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
- struct u64_stats_sync rx_syncp;
+ struct igb_rx_packet_stats pkt_stats;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ u16 rx_buffer_len;
+#else
+ struct sk_buff *skb;
+#endif
};
};
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ struct net_device *vmdq_netdev;
+ int vqueue_index; /* queue index for virtual netdev */
+#endif
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -283,29 +408,57 @@ struct igb_q_vector {
struct igb_ring_container rx, tx;
struct napi_struct napi;
+#ifndef IGB_NO_LRO
+ struct igb_lro_list lrolist; /* LRO list for queue vector*/
+#endif
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+#ifndef HAVE_NETDEV_NAPI_LIST
+ struct net_device poll_dev;
+#endif
/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
+#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES)
+ IGB_RING_FLAG_RX_CSUM,
+#endif
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_TX_CTX_IDX,
- IGB_RING_FLAG_TX_DETECT_HANG
+ IGB_RING_FLAG_TX_DETECT_HANG,
+};
+
+struct igb_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
};
+#define IGB_MAC_STATE_DEFAULT 0x1
+#define IGB_MAC_STATE_MODIFIED 0x2
+#define IGB_MAC_STATE_IN_USE 0x4
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
-#define IGB_RX_DESC(R, i) \
+#define IGB_RX_DESC(R, i) \
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
-#define IGB_TX_DESC(R, i) \
+#define IGB_TX_DESC(R, i) \
(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
-#define IGB_TX_CTXTDESC(R, i) \
+#define IGB_TX_CTXTDESC(R, i) \
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+#define netdev_ring(ring) \
+ ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
+#define ring_queue_index(ring) \
+ ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
+#else
+#define netdev_ring(ring) (ring->netdev)
+#define ring_queue_index(ring) (ring->queue_index)
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
const u32 stat_err_bits)
@@ -314,21 +467,27 @@ static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
}
/* igb_desc_unused - calculate if we have unused descriptors */
-static inline int igb_desc_unused(struct igb_ring *ring)
+static inline u16 igb_desc_unused(const struct igb_ring *ring)
{
- if (ring->next_to_clean > ring->next_to_use)
- return ring->next_to_clean - ring->next_to_use - 1;
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
- return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-struct igb_i2c_client_list {
- struct i2c_client *client;
- struct igb_i2c_client_list *next;
-};
+#ifdef CONFIG_BQL
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+#endif /* CONFIG_BQL */
-#ifdef CONFIG_IGB_HWMON
+struct igb_therm_proc_data {
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor_data;
+};
+#ifdef IGB_HWMON
#define IGB_HWMON_TYPE_LOC 0
#define IGB_HWMON_TYPE_TEMP 1
#define IGB_HWMON_TYPE_CAUTION 2
@@ -346,12 +505,19 @@ struct hwmon_buff {
struct hwmon_attr *hwmon_list;
unsigned int n_hwmon;
};
-#endif
+#endif /* IGB_HWMON */
+#ifdef ETHTOOL_GRXFHINDIR
+#define IGB_RETA_SIZE 128
+#endif /* ETHTOOL_GRXFHINDIR */
/* board specific private data structure */
struct igb_adapter {
+#ifdef HAVE_VLAN_RX_REGISTER
+ /* vlgrp must be first member of structure */
+ struct vlan_group *vlgrp;
+#else
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
+#endif
struct net_device *netdev;
unsigned long state;
@@ -360,47 +526,54 @@ struct igb_adapter {
unsigned int num_q_vectors;
struct msix_entry *msix_entries;
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
- u16 tx_itr;
- u16 rx_itr;
/* TX */
u16 tx_work_limit;
u32 tx_timeout_count;
int num_tx_queues;
- struct igb_ring *tx_ring[16];
+ struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
/* RX */
int num_rx_queues;
- struct igb_ring *rx_ring[16];
-
- u32 max_frame_size;
- u32 min_frame_size;
+ struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
struct timer_list watchdog_timer;
+ struct timer_list dma_err_timer;
struct timer_list phy_info_timer;
-
u16 mng_vlan_id;
u32 bd_number;
u32 wol;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
+ u8 port_num;
+
+ u8 __iomem *io_addr; /* for iounmap */
+
+ /* Interrupt Throttle Rate */
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
struct work_struct reset_task;
struct work_struct watchdog_task;
+ struct work_struct dma_err_task;
bool fc_autoneg;
u8 tx_timeout_factor;
- struct timer_list blink_timer;
- unsigned long led_status;
+
+#ifdef DEBUG
+ bool tx_hang_detected;
+ bool disable_hw_reset;
+#endif
+ u32 max_frame_size;
/* OS defined structs */
struct pci_dev *pdev;
-
- spinlock_t stats64_lock;
- struct rtnl_link_stats64 stats64;
+#ifndef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats net_stats;
+#endif
+#ifndef IGB_NO_LRO
+ struct igb_lro_stats lro_stats;
+#endif
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -408,9 +581,11 @@ struct igb_adapter {
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
+#ifdef ETHTOOL_TEST
u32 test_icr;
struct igb_ring test_tx_ring;
struct igb_ring test_rx_ring;
+#endif
int msg_enable;
@@ -419,73 +594,198 @@ struct igb_adapter {
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
+ u32 *config_space;
u16 tx_ring_count;
u16 rx_ring_count;
- unsigned int vfs_allocated_count;
struct vf_data_storage *vf_data;
+#ifdef IFLA_VF_MAX
int vf_rate_link_speed;
+#endif
+ u32 lli_port;
+ u32 lli_size;
+ unsigned int vfs_allocated_count;
+ /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
+ bool mdd;
+ int int_mode;
u32 rss_queues;
+ u32 tss_queues;
+ u32 vmdq_pools;
+ char fw_version[32];
u32 wvbr;
+ struct igb_mac_addr *mac_table;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
+#endif
+ int vferr_refcount;
+ int dmac;
u32 *shadow_vfta;
+ /* External Thermal Sensor support flag */
+ bool ets;
+#ifdef IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+#else /* IGB_HWMON */
+#ifdef IGB_PROCFS
+ struct proc_dir_entry *eth_dir;
+ struct proc_dir_entry *info_dir;
+ struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
+ struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
+ bool old_lsc;
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+ u32 etrack_id;
+
+#ifdef HAVE_PTP_1588_CLOCK
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
+ unsigned long last_rx_timestamp;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared;
+#endif /* HAVE_PTP_1588_CLOCK */
- char fw_version[32];
-#ifdef CONFIG_IGB_HWMON
- struct hwmon_buff igb_hwmon_buff;
- bool ets;
-#endif
+#ifdef HAVE_I2C_SUPPORT
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
+#endif /* HAVE_I2C_SUPPORT */
+ unsigned long link_check_timeout;
+
+ int devrc;
+
+ int copper_tries;
+ u16 eee_advert;
+#ifdef ETHTOOL_GRXFHINDIR
+ u32 rss_indir_tbl_init;
+ u8 rss_indir_tbl[IGB_RETA_SIZE];
+#endif
};
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+struct igb_vmdq_adapter {
+#ifdef HAVE_VLAN_RX_REGISTER
+ /* vlgrp must be first member of structure */
+ struct vlan_group *vlgrp;
+#else
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#endif
+ struct igb_adapter *real_adapter;
+ struct net_device *vnetdev;
+ struct net_device_stats net_stats;
+ struct igb_ring *tx_ring;
+ struct igb_ring *rx_ring;
+};
+#endif
+
#define IGB_FLAG_HAS_MSI (1 << 0)
#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
+#define IGB_FLAG_LLI_PUSH (1 << 2)
+#define IGB_FLAG_QUAD_PORT_A (1 << 3)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
+#define IGB_FLAG_EEE (1 << 5)
+#define IGB_FLAG_DMAC (1 << 6)
+#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
+#define IGB_FLAG_PTP (1 << 8)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
+#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
+#define IGB_FLAG_MEDIA_RESET (1 << 14)
+#define IGB_FLAG_MAS_ENABLE (1 << 15)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0 0X0001
+#define IGB_MAS_ENABLE_1 0X0002
+#define IGB_MAS_ENABLE_2 0X0004
+#define IGB_MAS_ENABLE_3 0X0008
+
+#define IGB_MIN_TXPBSIZE 20408
+#define IGB_TX_BUF_4096 4096
+
+#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
/* DMA Coalescing defines */
-#define IGB_MIN_TXPBSIZE 20408
-#define IGB_TX_BUF_4096 4096
-#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
+#define IGB_DMAC_DISABLE 0
+#define IGB_DMAC_MIN 250
+#define IGB_DMAC_500 500
+#define IGB_DMAC_EN_DEFAULT 1000
+#define IGB_DMAC_2000 2000
+#define IGB_DMAC_3000 3000
+#define IGB_DMAC_4000 4000
+#define IGB_DMAC_5000 5000
+#define IGB_DMAC_6000 6000
+#define IGB_DMAC_7000 7000
+#define IGB_DMAC_8000 8000
+#define IGB_DMAC_9000 9000
+#define IGB_DMAC_MAX 10000
+
+#define IGB_82576_TSYNC_SHIFT 19
+#define IGB_82580_TSYNC_SHIFT 24
+#define IGB_TS_HDR_LEN 16
+
+/* CEM Support */
+#define FW_HDR_LEN 0x4
+#define FW_CMD_DRV_INFO 0xDD
+#define FW_CMD_DRV_INFO_LEN 0x5
+#define FW_CMD_RESERVED 0X0
+#define FW_RESP_SUCCESS 0x1
+#define FW_UNUSED_VER 0x0
+#define FW_MAX_RETRIES 3
+#define FW_STATUS_SUCCESS 0x1
+#define FW_FAMILY_DRV_VER 0Xffffffff
+
+#define IGB_MAX_LINK_TRIES 20
+
+struct e1000_fw_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+#pragma pack(push, 1)
+struct e1000_fw_drv_info {
+ struct e1000_fw_hdr hdr;
+ u8 port_num;
+ u32 drv_version;
+ u16 pad; /* end spacing to ensure length is mult. of dword */
+ u8 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+#pragma pack(pop)
-#define IGB_82576_TSYNC_SHIFT 19
-#define IGB_TS_HDR_LEN 16
enum e1000_state_t {
__IGB_TESTING,
__IGB_RESETTING,
- __IGB_DOWN
-};
-
-enum igb_boards {
- board_82575,
+ __IGB_DOWN,
+ __IGB_PTP_TX_IN_PROGRESS,
};
extern char igb_driver_name[];
extern char igb_driver_version[];
+extern int igb_open(struct net_device *netdev);
+extern int igb_close(struct net_device *netdev);
extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+extern int igb_reinit_queues(struct igb_adapter *);
+#ifdef ETHTOOL_SRXFHINDIR
+extern void igb_write_rss_indir_tbl(struct igb_adapter *);
+#endif
+extern int igb_set_spd_dplx(struct igb_adapter *, u16);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
extern void igb_free_tx_resources(struct igb_ring *);
@@ -498,11 +798,14 @@ extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
struct igb_tx_buffer *);
extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+extern void igb_clean_rx_ring(struct igb_ring *);
+extern int igb_setup_queues(struct igb_adapter *adapter);
+extern void igb_update_stats(struct igb_adapter *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_check_options(struct igb_adapter *);
extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
+#ifdef HAVE_PTP_1588_CLOCK
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
@@ -514,56 +817,37 @@ extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb);
-static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
- !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
- igb_ptp_rx_rgtstamp(q_vector, skb);
-}
-
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-#ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *);
+#endif
+extern int igb_write_mc_addr_list(struct net_device *netdev);
+extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
+extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
+extern int igb_available_rars(struct igb_adapter *adapter);
+extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
+extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
+extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
+#ifndef HAVE_VLAN_RX_REGISTER
+extern void igb_vlan_mode(struct net_device *, u32);
#endif
-static inline s32 igb_reset_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.reset)
- return hw->phy.ops.reset(hw);
-
- return 0;
-}
-
-static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
-{
- if (hw->phy.ops.read_reg)
- return hw->phy.ops.read_reg(hw, offset, data);
-
- return 0;
-}
-
-static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
-{
- if (hw->phy.ops.write_reg)
- return hw->phy.ops.write_reg(hw, offset, data);
-
- return 0;
-}
-
-static inline s32 igb_get_phy_info(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_phy_info)
- return hw->phy.ops.get_phy_info(hw);
- return 0;
-}
+#define E1000_PCS_CFG_IGN_SD 1
-static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
-{
- return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
-}
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+#ifdef IGB_HWMON
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
+#else
+#ifdef IGB_PROCFS
+int igb_procfs_init(struct igb_adapter *adapter);
+void igb_procfs_exit(struct igb_adapter *adapter);
+int igb_procfs_topdir_init(void);
+void igb_procfs_topdir_exit(void);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_debugfs.c b/drivers/net/ethernet/intel/igb/igb_debugfs.c
new file mode 100644
index 000000000000..3f8ecb6fae9b
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_debugfs.c
@@ -0,0 +1,26 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7876240fa74e..fe29a1e08f32 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -27,21 +24,27 @@
/* ethtool support for igb */
-#include <linux/vmalloc.h>
#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
#include <linux/highmem.h>
-#include <linux/mdio.h>
#include "igb.h"
+#include "igb_regtest.h"
+#include <linux/if_vlan.h>
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+#endif
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
struct igb_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -53,6 +56,7 @@ struct igb_stats {
.sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
.stat_offset = offsetof(struct igb_adapter, _stat) \
}
+
static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("rx_packets", stats.gprc),
IGB_STAT("tx_packets", stats.gptc),
@@ -86,6 +90,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
IGB_STAT("rx_long_byte_count", stats.gorc),
IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+#ifndef IGB_NO_LRO
+ IGB_STAT("lro_aggregated", lro_stats.coal),
+ IGB_STAT("lro_flushed", lro_stats.flushed),
+#endif /* IGB_LRO */
IGB_STAT("tx_smbus", stats.mgptc),
IGB_STAT("rx_smbus", stats.mgprc),
IGB_STAT("dropped_smbus", stats.mgpdc),
@@ -93,15 +101,18 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+#ifdef HAVE_PTP_1588_CLOCK
IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+#endif /* HAVE_PTP_1588_CLOCK */
};
#define IGB_NETDEV_STAT(_net_stat) { \
- .stat_string = __stringify(_net_stat), \
- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+ .stat_string = #_net_stat, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
+
static const struct igb_stats igb_gstrings_net_stats[] = {
IGB_NETDEV_STAT(rx_errors),
IGB_NETDEV_STAT(tx_errors),
@@ -114,15 +125,12 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
IGB_NETDEV_STAT(tx_heartbeat_errors)
};
-#define IGB_GLOBAL_STATS_LEN \
- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
-#define IGB_NETDEV_STATS_LEN \
- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
+#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
+#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
#define IGB_RX_QUEUE_STATS_LEN \
(sizeof(struct igb_rx_queue_stats) / sizeof(u64))
-
-#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
-
+#define IGB_TX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
#define IGB_QUEUE_STATS_LEN \
((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
IGB_RX_QUEUE_STATS_LEN) + \
@@ -131,12 +139,16 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
#define IGB_STATS_LEN \
(IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
@@ -162,24 +174,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising |= hw->phy.autoneg_advertised;
}
- if (hw->mac.autoneg != 1)
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
-
- if (hw->fc.requested_mode == e1000_fc_full)
- ecmd->advertising |= ADVERTISED_Pause;
- else if (hw->fc.requested_mode == e1000_fc_rx_pause)
- ecmd->advertising |= (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- else if (hw->fc.requested_mode == e1000_fc_tx_pause)
- ecmd->advertising |= ADVERTISED_Asym_Pause;
- else
- ecmd->advertising &= ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
-
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
ecmd->transceiver = XCVR_INTERNAL;
+
} else {
ecmd->supported = (SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full |
@@ -187,7 +185,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_Autoneg |
SUPPORTED_Pause);
if (hw->mac.type == e1000_i354)
- ecmd->supported |= SUPPORTED_2500baseX_Full;
+ ecmd->supported |= (SUPPORTED_2500baseX_Full);
ecmd->advertising = ADVERTISED_FIBRE;
@@ -212,26 +210,43 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_EXTERNAL;
}
- status = rd32(E1000_STATUS);
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
if (status & E1000_STATUS_LU) {
if ((hw->mac.type == e1000_i354) &&
(status & E1000_STATUS_2P5_SKU) &&
!(status & E1000_STATUS_2P5_SKU_OVER))
- ecmd->speed = SPEED_2500;
+ ethtool_cmd_speed_set(ecmd, SPEED_2500);
else if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
else
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
+
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = -1;
}
@@ -240,6 +255,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
+#ifdef ETH_TP_MDI_X
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if (hw->phy.media_type == e1000_media_type_copper)
@@ -248,11 +264,14 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+#ifdef ETH_TP_MDI_AUTO
if (hw->phy.mdix == AUTO_ALL_MODES)
ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+#endif
+#endif /* ETH_TP_MDI_X */
return 0;
}
@@ -261,16 +280,26 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ if (ecmd->duplex == DUPLEX_HALF) {
+ if (!hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
+ hw->dev_spec._82575.eee_disable = true;
+ } else {
+ if (hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
+ hw->dev_spec._82575.eee_disable = false;
+ }
+
/* When SoL/IDER sessions are active, autoneg/speed/duplex
- * cannot be changed
- */
- if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev,
- "Cannot change link characteristics when SoL/IDER is active.\n");
+ * cannot be changed */
+ if (e1000_check_reset_block(hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
- /* MDI setting is only allowed when autoneg enabled because
+#ifdef ETH_TP_MDI_AUTO
+ /*
+ * MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -285,8 +314,9 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
}
+#endif /* ETH_TP_MDI_AUTO */
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -319,14 +349,13 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
- /* calling this overrides forced MDI setting */
- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
+#ifdef ETH_TP_MDI_AUTO
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
/* fix up the value for auto (3 => 0) as zero is mapped
@@ -338,6 +367,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
}
+#endif /* ETH_TP_MDI_AUTO */
/* reset the link */
if (netif_running(adapter->netdev)) {
igb_down(adapter);
@@ -354,7 +384,8 @@ static u32 igb_get_link(struct net_device *netdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_mac_info *mac = &adapter->hw.mac;
- /* If the link is not reported up to netdev, interrupts are disabled,
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -395,7 +426,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -417,10 +448,18 @@ static int igb_set_pauseparam(struct net_device *netdev,
hw->fc.current_mode = hw->fc.requested_mode;
- retval = ((hw->phy.media_type == e1000_media_type_copper) ?
- igb_force_mac_fc(hw) : igb_setup_link(hw));
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000_set_fc_watermarks_generic(hw);
+ }
}
+out:
clear_bit(__IGB_RESETTING, &adapter->state);
return retval;
}
@@ -439,7 +478,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
static int igb_get_regs_len(struct net_device *netdev)
{
-#define IGB_REGS_LEN 739
+#define IGB_REGS_LEN 555
return IGB_REGS_LEN * sizeof(u32);
}
@@ -456,80 +495,78 @@ static void igb_get_regs(struct net_device *netdev,
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
- regs_buff[0] = rd32(E1000_CTRL);
- regs_buff[1] = rd32(E1000_STATUS);
- regs_buff[2] = rd32(E1000_CTRL_EXT);
- regs_buff[3] = rd32(E1000_MDIC);
- regs_buff[4] = rd32(E1000_SCTL);
- regs_buff[5] = rd32(E1000_CONNSW);
- regs_buff[6] = rd32(E1000_VET);
- regs_buff[7] = rd32(E1000_LEDCTL);
- regs_buff[8] = rd32(E1000_PBA);
- regs_buff[9] = rd32(E1000_PBS);
- regs_buff[10] = rd32(E1000_FRTIMER);
- regs_buff[11] = rd32(E1000_TCPTIMER);
+ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
+ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
+ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
+ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
+ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
+ regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
+ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
+ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
+ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
+ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
+ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
/* NVM Register */
- regs_buff[12] = rd32(E1000_EECD);
+ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
/* Interrupt */
/* Reading EICS for EICR because they read the
- * same but EICS does not clear on read
- */
- regs_buff[13] = rd32(E1000_EICS);
- regs_buff[14] = rd32(E1000_EICS);
- regs_buff[15] = rd32(E1000_EIMS);
- regs_buff[16] = rd32(E1000_EIMC);
- regs_buff[17] = rd32(E1000_EIAC);
- regs_buff[18] = rd32(E1000_EIAM);
+ * same but EICS does not clear on read */
+ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
+ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
+ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
+ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
/* Reading ICS for ICR because they read the
- * same but ICS does not clear on read
- */
- regs_buff[19] = rd32(E1000_ICS);
- regs_buff[20] = rd32(E1000_ICS);
- regs_buff[21] = rd32(E1000_IMS);
- regs_buff[22] = rd32(E1000_IMC);
- regs_buff[23] = rd32(E1000_IAC);
- regs_buff[24] = rd32(E1000_IAM);
- regs_buff[25] = rd32(E1000_IMIRVP);
+ * same but ICS does not clear on read */
+ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
+ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
+ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
+ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
+ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
/* Flow Control */
- regs_buff[26] = rd32(E1000_FCAL);
- regs_buff[27] = rd32(E1000_FCAH);
- regs_buff[28] = rd32(E1000_FCTTV);
- regs_buff[29] = rd32(E1000_FCRTL);
- regs_buff[30] = rd32(E1000_FCRTH);
- regs_buff[31] = rd32(E1000_FCRTV);
+ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
+ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
+ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
+ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
+ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
+ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
/* Receive */
- regs_buff[32] = rd32(E1000_RCTL);
- regs_buff[33] = rd32(E1000_RXCSUM);
- regs_buff[34] = rd32(E1000_RLPML);
- regs_buff[35] = rd32(E1000_RFCTL);
- regs_buff[36] = rd32(E1000_MRQC);
- regs_buff[37] = rd32(E1000_VT_CTL);
+ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
+ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
+ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
+ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
+ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
+ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
/* Transmit */
- regs_buff[38] = rd32(E1000_TCTL);
- regs_buff[39] = rd32(E1000_TCTL_EXT);
- regs_buff[40] = rd32(E1000_TIPG);
- regs_buff[41] = rd32(E1000_DTXCTL);
+ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
+ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
+ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
+ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
/* Wake Up */
- regs_buff[42] = rd32(E1000_WUC);
- regs_buff[43] = rd32(E1000_WUFC);
- regs_buff[44] = rd32(E1000_WUS);
- regs_buff[45] = rd32(E1000_IPAV);
- regs_buff[46] = rd32(E1000_WUPL);
+ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
+ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
+ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
+ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
+ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
/* MAC */
- regs_buff[47] = rd32(E1000_PCS_CFG0);
- regs_buff[48] = rd32(E1000_PCS_LCTL);
- regs_buff[49] = rd32(E1000_PCS_LSTAT);
- regs_buff[50] = rd32(E1000_PCS_ANADV);
- regs_buff[51] = rd32(E1000_PCS_LPAB);
- regs_buff[52] = rd32(E1000_PCS_NPTX);
- regs_buff[53] = rd32(E1000_PCS_LPABNP);
+ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
+ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
+ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
/* Statistics */
regs_buff[54] = adapter->stats.crcerrs;
@@ -595,112 +632,75 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[120] = adapter->stats.hrmpc;
for (i = 0; i < 4; i++)
- regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
+ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
for (i = 0; i < 4; i++)
- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
+ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
for (i = 0; i < 4; i++)
- regs_buff[129 + i] = rd32(E1000_RDBAL(i));
+ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[133 + i] = rd32(E1000_RDBAH(i));
+ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[137 + i] = rd32(E1000_RDLEN(i));
+ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
for (i = 0; i < 4; i++)
- regs_buff[141 + i] = rd32(E1000_RDH(i));
+ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
for (i = 0; i < 4; i++)
- regs_buff[145 + i] = rd32(E1000_RDT(i));
+ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
for (i = 0; i < 4; i++)
- regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
+ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
for (i = 0; i < 10; i++)
- regs_buff[153 + i] = rd32(E1000_EITR(i));
+ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
for (i = 0; i < 8; i++)
- regs_buff[163 + i] = rd32(E1000_IMIR(i));
+ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
for (i = 0; i < 8; i++)
- regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
+ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
for (i = 0; i < 16; i++)
- regs_buff[179 + i] = rd32(E1000_RAL(i));
+ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
for (i = 0; i < 16; i++)
- regs_buff[195 + i] = rd32(E1000_RAH(i));
+ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
for (i = 0; i < 4; i++)
- regs_buff[211 + i] = rd32(E1000_TDBAL(i));
+ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[215 + i] = rd32(E1000_TDBAH(i));
+ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[219 + i] = rd32(E1000_TDLEN(i));
+ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
for (i = 0; i < 4; i++)
- regs_buff[223 + i] = rd32(E1000_TDH(i));
+ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
for (i = 0; i < 4; i++)
- regs_buff[227 + i] = rd32(E1000_TDT(i));
+ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
for (i = 0; i < 4; i++)
- regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
+ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
for (i = 0; i < 4; i++)
- regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
+ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
+ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
+ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
for (i = 0; i < 4; i++)
- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
+ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
for (i = 0; i < 4; i++)
- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
+ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
for (i = 0; i < 32; i++)
- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
+ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
for (i = 0; i < 128; i++)
- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
+ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
for (i = 0; i < 128; i++)
- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
+ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
for (i = 0; i < 4; i++)
- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
-
- regs_buff[547] = rd32(E1000_TDFH);
- regs_buff[548] = rd32(E1000_TDFT);
- regs_buff[549] = rd32(E1000_TDFHS);
- regs_buff[550] = rd32(E1000_TDFPC);
+ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
+ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
+ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
+ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
+ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
if (hw->mac.type > e1000_82580) {
regs_buff[551] = adapter->stats.o2bgptc;
regs_buff[552] = adapter->stats.b2ospc;
regs_buff[553] = adapter->stats.o2bspc;
regs_buff[554] = adapter->stats.b2ogprc;
}
-
- if (hw->mac.type != e1000_82576)
- return;
- for (i = 0; i < 12; i++)
- regs_buff[555 + i] = rd32(E1000_SRRCTL(i + 4));
- for (i = 0; i < 4; i++)
- regs_buff[567 + i] = rd32(E1000_PSRTYPE(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[571 + i] = rd32(E1000_RDBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[583 + i] = rd32(E1000_RDBAH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[595 + i] = rd32(E1000_RDLEN(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[607 + i] = rd32(E1000_RDH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[619 + i] = rd32(E1000_RDT(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[631 + i] = rd32(E1000_RXDCTL(i + 4));
-
- for (i = 0; i < 12; i++)
- regs_buff[643 + i] = rd32(E1000_TDBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[655 + i] = rd32(E1000_TDBAH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[667 + i] = rd32(E1000_TDLEN(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[679 + i] = rd32(E1000_TDH(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[691 + i] = rd32(E1000_TDT(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[703 + i] = rd32(E1000_TXDCTL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[715 + i] = rd32(E1000_TDWBAL(i + 4));
- for (i = 0; i < 12; i++)
- regs_buff[727 + i] = rd32(E1000_TDWBAH(i + 4));
}
static int igb_get_eeprom_len(struct net_device *netdev)
@@ -733,13 +733,13 @@ static int igb_get_eeprom(struct net_device *netdev,
return -ENOMEM;
if (hw->nvm.type == e1000_nvm_eeprom_spi)
- ret_val = hw->nvm.ops.read(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -747,7 +747,7 @@ static int igb_get_eeprom(struct net_device *netdev,
/* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len);
@@ -769,9 +769,6 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
- return -EOPNOTSUPP;
-
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -786,19 +783,17 @@ static int igb_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word
- * only the second byte of the word is being modified
- */
- ret_val = hw->nvm.ops.read(hw, first_word, 1,
+ /* need read/modify/write of first changed EEPROM word */
+ /* only the second byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word
- * only the first byte of the word is being modified
- */
- ret_val = hw->nvm.ops.read(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ /* need read/modify/write of last changed EEPROM word */
+ /* only the first byte of the word is being modified */
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
}
/* Device's eeprom is always little-endian, word addressable */
@@ -808,18 +803,16 @@ static int igb_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
- ret_val = hw->nvm.ops.write(hw, first_word,
- last_word - first_word + 1, eeprom_buff);
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed
- * and flush shadow RAM for 82573 controllers
- */
- if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
- hw->nvm.ops.update(hw);
+ /* Update the checksum if write succeeded.
+ * and flush shadow RAM for 82573 controllers */
+ if (ret_val == 0)
+ e1000_update_nvm_checksum(hw);
- igb_set_fw_version(adapter);
kfree(eeprom_buff);
return ret_val;
}
@@ -829,16 +822,14 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
+ strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
+ strncpy(drvinfo->version, igb_driver_version,
+ sizeof(drvinfo->version) - 1);
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers
- */
- strlcpy(drvinfo->fw_version, adapter->fw_version,
- sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
+ strncpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info) - 1);
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -852,8 +843,12 @@ static void igb_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = IGB_MAX_RXD;
ring->tx_max_pending = IGB_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->rx_ring_count;
ring->tx_pending = adapter->tx_ring_count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
}
static int igb_set_ringparam(struct net_device *netdev,
@@ -867,12 +862,12 @@ static int igb_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
+ new_rx_count = min_t(u16, ring->rx_pending, (u32)IGB_MAX_RXD);
+ new_rx_count = max_t(u16, new_rx_count, (u16)IGB_MIN_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
+ new_tx_count = min_t(u16, ring->tx_pending, (u32)IGB_MAX_TXD);
+ new_tx_count = max_t(u16, new_tx_count, (u16)IGB_MIN_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring_count) &&
@@ -882,7 +877,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -895,11 +890,11 @@ static int igb_set_ringparam(struct net_device *netdev,
}
if (adapter->num_tx_queues > adapter->num_rx_queues)
- temp_ring = vmalloc(adapter->num_tx_queues *
- sizeof(struct igb_ring));
+ temp_ring = vmalloc(adapter->num_tx_queues
+ * sizeof(struct igb_ring));
else
- temp_ring = vmalloc(adapter->num_rx_queues *
- sizeof(struct igb_ring));
+ temp_ring = vmalloc(adapter->num_rx_queues
+ * sizeof(struct igb_ring));
if (!temp_ring) {
err = -ENOMEM;
@@ -908,9 +903,10 @@ static int igb_set_ringparam(struct net_device *netdev,
igb_down(adapter);
- /* We can't just free everything and then setup again,
+ /*
+ * We can't just free everything and then setup again,
* because the ISRs in MSI-X mode get passed pointers
- * to the Tx and Rx ring structs.
+ * to the tx and rx ring structs.
*/
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -972,241 +968,27 @@ clear_reset:
return err;
}
-/* ethtool register test data */
-struct igb_reg_test {
- u16 reg;
- u16 reg_offset;
- u16 array_len;
- u16 test_type;
- u32 mask;
- u32 write;
-};
-
-/* In the hardware, registers are laid out either singly, in arrays
- * spaced 0x100 bytes apart, or in contiguous tables. We assume
- * most tests take place on arrays or single registers (handled
- * as a single-element array) and special-case the tables.
- * Table tests are always pattern tests.
- *
- * We also make provision for some required setup steps by specifying
- * registers to be written without any read-back testing.
- */
-
-#define PATTERN_TEST 1
-#define SET_READ_TEST 2
-#define WRITE_NO_TEST 3
-#define TABLE32_TEST 4
-#define TABLE64_TEST_LO 5
-#define TABLE64_TEST_HI 6
-
-/* i210 reg test */
-static struct igb_reg_test reg_test_i210[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i210, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x900FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0, 0 }
-};
-
-/* i350 reg test */
-static struct igb_reg_test reg_test_i350[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i350, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82580 reg test */
-static struct igb_reg_test reg_test_82580[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* RDH is read-only for 82580, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82576 reg test */
-static struct igb_reg_test reg_test_82576[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82576, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82575 register test */
-static struct igb_reg_test reg_test_82575[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82575, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
- wr32(reg, (_test[pat] & write));
- val = rd32(reg) & mask;
+ E1000_WRITE_REG(hw, reg, (_test[pat] & write));
+ val = E1000_READ_REG(hw, reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev,
+ dev_err(pci_dev_to_dev(adapter->pdev),
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
- reg, val, (_test[pat] & write & mask));
- *data = reg;
- return 1;
+ E1000_REGISTER(hw, reg), val, (_test[pat]
+ & write & mask));
+ *data = E1000_REGISTER(hw, reg);
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1214,17 +996,17 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
- wr32(reg, write & mask);
- val = rd32(reg);
+ E1000_WRITE_REG(hw, reg, write & mask);
+ val = E1000_READ_REG(hw, reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
- *data = reg;
- return 1;
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "set/check reg %04X test failed:got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
+ *data = E1000_REGISTER(hw, reg);
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1276,19 +1058,19 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writable on newer MACs.
*/
- before = rd32(E1000_STATUS);
- value = (rd32(E1000_STATUS) & toggle);
- wr32(E1000_STATUS, toggle);
- after = rd32(E1000_STATUS) & toggle;
+ before = E1000_READ_REG(hw, E1000_STATUS);
+ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
+ E1000_WRITE_REG(hw, E1000_STATUS, toggle);
+ after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev,
+ dev_err(pci_dev_to_dev(adapter->pdev),
"failed STATUS register test got: 0x%08X expected: 0x%08X\n",
after, value);
*data = 1;
return 1;
}
/* restore previous status */
- wr32(E1000_STATUS, before);
+ E1000_WRITE_REG(hw, E1000_STATUS, before);
/* Perform the remainder of the register test, looping through
* the test table until we either fail or reach the null entry.
@@ -1310,7 +1092,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
break;
case WRITE_NO_TEST:
writel(test->write,
- (adapter->hw.hw_addr + test->reg)
+ (adapter->hw.hw_addr + test->reg)
+ (i * test->reg_offset));
break;
case TABLE32_TEST:
@@ -1341,11 +1123,9 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
- *data = 2;
- }
+ /* Validate NVM checksum */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
+ *data = 2;
return *data;
}
@@ -1355,7 +1135,7 @@ static irqreturn_t igb_test_intr(int irq, void *data)
struct igb_adapter *adapter = (struct igb_adapter *) data;
struct e1000_hw *hw = &adapter->hw;
- adapter->test_icr |= rd32(E1000_ICR);
+ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
return IRQ_HANDLED;
}
@@ -1364,7 +1144,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 mask, ics_mask, i = 0, shared_int = true;
+ u32 mask, ics_mask, i = 0, shared_int = TRUE;
u32 irq = adapter->pdev->irq;
*data = 0;
@@ -1372,32 +1152,32 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->msix_entries) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ &igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- shared_int = false;
+ shared_int = FALSE;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
netdev->name, adapter)) {
- shared_int = false;
- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
+ shared_int = FALSE;
+ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
netdev->name, adapter)) {
*data = 1;
return -1;
}
- dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
- wr32(E1000_IMC, ~0);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1412,9 +1192,11 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
break;
case e1000_i350:
case e1000_i354:
+ ics_mask = 0x77DCFED5;
+ break;
case e1000_i210:
case e1000_i211:
- ics_mask = 0x77DCFED5;
+ ics_mask = 0x774CFED5;
break;
default:
ics_mask = 0x7FFFFFFF;
@@ -1439,12 +1221,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMC, mask);
- wr32(E1000_ICS, mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1461,12 +1243,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMS, mask);
- wr32(E1000_ICS, mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMS, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1483,12 +1265,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMC, ~mask);
- wr32(E1000_ICS, ~mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~mask);
+ E1000_WRITE_REG(hw, E1000_ICS, ~mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1498,9 +1280,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
}
/* Disable all the interrupts */
- wr32(E1000_IMC, ~0);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
/* Unhook test interrupt handler */
if (adapter->msix_entries)
@@ -1526,7 +1308,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->dev = pci_dev_to_dev(adapter->pdev);
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1540,17 +1322,20 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->dev = pci_dev_to_dev(adapter->pdev);
rx_ring->netdev = adapter->netdev;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
+#endif
rx_ring->reg_idx = adapter->vfs_allocated_count;
if (igb_setup_rx_resources(rx_ring)) {
- ret_val = 3;
+ ret_val = 2;
goto err_nomem;
}
/* set the default queue to queue 0 of PF */
- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
+ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
/* enable receive ring */
igb_setup_rctl(adapter);
@@ -1570,10 +1355,10 @@ static void igb_phy_disable_receiver(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
- igb_write_phy_reg(hw, 29, 0x001F);
- igb_write_phy_reg(hw, 30, 0x8FFC);
- igb_write_phy_reg(hw, 29, 0x001A);
- igb_write_phy_reg(hw, 30, 0x8FF0);
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
}
static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
@@ -1581,31 +1366,32 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- hw->mac.autoneg = false;
+ hw->mac.autoneg = FALSE;
if (hw->phy.type == e1000_phy_m88) {
if (hw->phy.id != I210_I_PHY_ID) {
/* Auto-MDI/MDIX Off */
- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
} else {
/* force 1000, set loopback */
- igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
}
+ } else {
+ /* enable MII loopback */
+ if (hw->phy.type == e1000_phy_82580)
+ e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
}
- /* add small delay to avoid loopback test failure */
- msleep(50);
-
- /* force 1000, set loopback */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = rd32(E1000_CTRL);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
@@ -1616,7 +1402,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
- wr32(E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
@@ -1638,63 +1424,64 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- reg = rd32(E1000_CTRL_EXT);
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* use CTRL_EXT to identify link type as SGMII can appear as copper */
if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
-
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
/* Enable DH89xxCC MPHY for near end loopback */
- reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- wr32(E1000_MPHY_ADDR_CTL, reg);
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
- reg = rd32(E1000_MPHY_DATA);
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- wr32(E1000_MPHY_DATA, reg);
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
}
- reg = rd32(E1000_RCTL);
+ reg = E1000_READ_REG(hw, E1000_RCTL);
reg |= E1000_RCTL_LBM_TCVR;
- wr32(E1000_RCTL, reg);
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
- reg = rd32(E1000_CTRL);
+ reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~(E1000_CTRL_RFCE |
E1000_CTRL_TFCE |
E1000_CTRL_LRST);
reg |= E1000_CTRL_SLU |
E1000_CTRL_FD;
- wr32(E1000_CTRL, reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
/* Unset switch control to serdes energy detect */
- reg = rd32(E1000_CONNSW);
+ reg = E1000_READ_REG(hw, E1000_CONNSW);
reg &= ~E1000_CONNSW_ENRGSRC;
- wr32(E1000_CONNSW, reg);
+ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
/* Unset sigdetect for SERDES loopback on
- * 82580 and newer devices.
+ * 82580 and newer devices
*/
if (hw->mac.type >= e1000_82580) {
- reg = rd32(E1000_PCS_CFG0);
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg |= E1000_PCS_CFG_IGN_SD;
- wr32(E1000_PCS_CFG0, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
}
/* Set PCS register for forced speed */
- reg = rd32(E1000_PCS_LCTL);
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
E1000_PCS_LCTL_FSD | /* Force Speed */
E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
- wr32(E1000_PCS_LCTL, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
return 0;
}
@@ -1709,35 +1496,37 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
u16 phy_reg;
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
- reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- wr32(E1000_MPHY_ADDR_CTL, reg);
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
- reg = rd32(E1000_MPHY_DATA);
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- wr32(E1000_MPHY_DATA, reg);
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
}
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- hw->mac.autoneg = true;
- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ hw->mac.autoneg = TRUE;
+ e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
- igb_phy_sw_reset(hw);
+ if (hw->phy.type == I210_I_PHY_ID)
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ e1000_phy_commit(hw);
}
}
-
static void igb_create_lbtest_frame(struct sk_buff *skb,
unsigned int frame_size)
{
@@ -1756,19 +1545,25 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
frame_size >>= 1;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data = rx_buffer->skb->data;
+#else
data = kmap(rx_buffer->page);
+#endif
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
kunmap(rx_buffer->page);
+#endif
return match;
}
-static int igb_clean_test_rings(struct igb_ring *rx_ring,
+static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
struct igb_ring *tx_ring,
unsigned int size)
{
@@ -1783,13 +1578,17 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- /* check Rx buffer */
+ /* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
IGB_RX_BUFSZ,
+#endif
DMA_FROM_DEVICE);
/* verify contents of skb */
@@ -1799,14 +1598,18 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
IGB_RX_BUFSZ,
+#endif
DMA_FROM_DEVICE);
- /* unmap buffer on Tx side */
+ /* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- /* increment Rx/Tx next to clean counters */
+ /* increment rx/tx next to clean counters */
rx_ntc++;
if (rx_ntc == rx_ring->count)
rx_ntc = 0;
@@ -1818,8 +1621,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
- netdev_tx_reset_queue(txring_txq(tx_ring));
-
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
@@ -1847,7 +1648,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
igb_create_lbtest_frame(skb, size);
skb_put(skb, size);
- /* Calculate the loop count based on the largest descriptor ring
+ /*
+ * Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
@@ -1874,7 +1676,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
break;
}
- /* allow 200 milliseconds for packets to go from Tx to Rx */
+ /* allow 200 milliseconds for packets to go from tx to rx */
msleep(200);
good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
@@ -1893,21 +1695,14 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
{
/* PHY loopback cannot be performed if SoL/IDER
- * sessions are active
- */
- if (igb_check_reset_block(&adapter->hw)) {
- dev_err(&adapter->pdev->dev,
+ * sessions are active */
+ if (e1000_check_reset_block(&adapter->hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
"Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
- if (adapter->hw.mac.type == e1000_i354) {
- dev_info(&adapter->pdev->dev,
- "Loopback test not supported on i354.\n");
- *data = 0;
- goto out;
- }
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
@@ -1915,6 +1710,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
if (*data)
goto err_loopback;
*data = igb_run_loopback_test(adapter);
+
igb_loopback_cleanup(adapter);
err_loopback:
@@ -1925,31 +1721,39 @@ out:
static int igb_link_test(struct igb_adapter *adapter, u64 *data)
{
- struct e1000_hw *hw = &adapter->hw;
+ u32 link;
+ int i, time;
+
*data = 0;
- if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ time = 0;
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
- hw->mac.serdes_has_link = false;
+ adapter->hw.mac.serdes_has_link = FALSE;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes
- */
+ * could take as long as 2-3 minutes */
do {
- hw->mac.ops.check_for_link(&adapter->hw);
- if (hw->mac.serdes_has_link)
- return *data;
+ e1000_check_for_link(&adapter->hw);
+ if (adapter->hw.mac.serdes_has_link)
+ goto out;
msleep(20);
} while (i++ < 3750);
*data = 1;
} else {
- hw->mac.ops.check_for_link(&adapter->hw);
- if (hw->mac.autoneg)
- msleep(5000);
-
- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
+ for (i = 0; i < IGB_MAX_LINK_TRIES; i++) {
+ link = igb_has_link(adapter);
+ if (link) {
+ goto out;
+ } else {
+ time++;
+ msleep(1000);
+ }
+ }
+ if (!link)
*data = 1;
}
+out:
return *data;
}
@@ -1970,20 +1774,19 @@ static void igb_diag_test(struct net_device *netdev,
forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
autoneg = adapter->hw.mac.autoneg;
- dev_info(&adapter->pdev->dev, "offline testing starting\n");
+ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
/* power up link for link test */
igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result
- */
+ * interfere with test result */
if (igb_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ igb_close(netdev);
else
igb_reset(adapter);
@@ -1999,8 +1802,10 @@ static void igb_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+
/* power up link for loopback test */
igb_power_up_link(adapter);
+
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -2010,15 +1815,15 @@ static void igb_diag_test(struct net_device *netdev,
adapter->hw.mac.autoneg = autoneg;
/* force this routine to wait until autoneg complete/timeout */
- adapter->hw.phy.autoneg_wait_to_complete = true;
+ adapter->hw.phy.autoneg_wait_to_complete = TRUE;
igb_reset(adapter);
- adapter->hw.phy.autoneg_wait_to_complete = false;
+ adapter->hw.phy.autoneg_wait_to_complete = FALSE;
clear_bit(__IGB_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ igb_open(netdev);
} else {
- dev_info(&adapter->pdev->dev, "online testing starting\n");
+ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
/* PHY is powered down when interface is down */
if (if_running && igb_link_test(adapter, &data[4]))
@@ -2041,14 +1846,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
wol->wolopts = 0;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
@@ -2096,8 +1902,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
/* bit defines for adapter->led_status */
-#define IGB_LED_ON 0
-
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
static int igb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
@@ -2106,23 +1911,47 @@ static int igb_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- igb_blink_led(hw);
+ e1000_blink_led(hw);
return 2;
case ETHTOOL_ID_ON:
- igb_blink_led(hw);
+ e1000_led_on(hw);
break;
case ETHTOOL_ID_OFF:
- igb_led_off(hw);
+ e1000_led_off(hw);
break;
case ETHTOOL_ID_INACTIVE:
- igb_led_off(hw);
- clear_bit(IGB_LED_ON, &adapter->led_status);
- igb_cleanup_led(hw);
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
break;
}
return 0;
}
+#else
+static int igb_phys_id(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long timeout;
+
+ timeout = data * 1000;
+
+ /*
+ * msleep_interruptable only accepts unsigned int so we are limited
+ * in how long a duration we can wait
+ */
+ if (!timeout || timeout > UINT_MAX)
+ timeout = UINT_MAX;
+
+ e1000_blink_led(hw);
+ msleep_interruptible(timeout);
+
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
+
+ return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
static int igb_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
@@ -2130,11 +1959,36 @@ static int igb_set_coalesce(struct net_device *netdev,
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
+ if (ec->rx_max_coalesced_frames ||
+ ec->rx_coalesce_usecs_irq ||
+ ec->rx_max_coalesced_frames_irq ||
+ ec->tx_max_coalesced_frames ||
+ ec->tx_coalesce_usecs_irq ||
+ ec->stats_block_coalesce_usecs ||
+ ec->use_adaptive_rx_coalesce ||
+ ec->use_adaptive_tx_coalesce ||
+ ec->pkt_rate_low ||
+ ec->rx_coalesce_usecs_low ||
+ ec->rx_max_coalesced_frames_low ||
+ ec->tx_coalesce_usecs_low ||
+ ec->tx_max_coalesced_frames_low ||
+ ec->pkt_rate_high ||
+ ec->rx_coalesce_usecs_high ||
+ ec->rx_max_coalesced_frames_high ||
+ ec->tx_coalesce_usecs_high ||
+ ec->tx_max_coalesced_frames_high ||
+ ec->rate_sample_interval) {
+ netdev_err(netdev, "set_coalesce: invalid parameter");
+ return -ENOTSUPP;
+ }
+
if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
+ (ec->rx_coalesce_usecs == 2)) {
+ netdev_err(netdev, "set_coalesce: invalid setting");
return -EINVAL;
+ }
if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->tx_coalesce_usecs > 3) &&
@@ -2145,11 +1999,12 @@ static int igb_set_coalesce(struct net_device *netdev,
if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
return -EINVAL;
+ if (ec->tx_max_coalesced_frames_irq)
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
/* If ITR is disabled, disable DMAC */
- if (ec->rx_coalesce_usecs == 0) {
- if (adapter->flags & IGB_FLAG_DMAC)
- adapter->flags &= ~IGB_FLAG_DMAC;
- }
+ if (ec->rx_coalesce_usecs == 0)
+ adapter->dmac = IGB_DMAC_DISABLE;
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
@@ -2190,6 +2045,8 @@ static int igb_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
if (adapter->tx_itr_setting <= 3)
ec->tx_coalesce_usecs = adapter->tx_itr_setting;
@@ -2208,6 +2065,7 @@ static int igb_nway_reset(struct net_device *netdev)
return 0;
}
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
static int igb_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -2219,19 +2077,32 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return -ENOTSUPP;
}
}
+#else
+static int igb_get_stats_count(struct net_device *netdev)
+{
+ return IGB_STATS_LEN;
+}
+
+static int igb_diag_test_count(struct net_device *netdev)
+{
+ return IGB_TEST_LEN;
+}
+#endif
static void igb_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct rtnl_link_stats64 *net_stats = &adapter->stats64;
- unsigned int start;
- struct igb_ring *ring;
- int i, j;
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif
+ u64 *queue_stat;
+ int i, j, k;
char *p;
- spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, net_stats);
+ igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2244,36 +2115,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- u64 restart2;
-
- ring = adapter->tx_ring[j];
- do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
- data[i] = ring->tx_stats.packets;
- data[i+1] = ring->tx_stats.bytes;
- data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
- do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
- restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
- data[i+2] += restart2;
-
- i += IGB_TX_QUEUE_STATS_LEN;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
+ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- ring = adapter->rx_ring[j];
- do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
- data[i] = ring->rx_stats.packets;
- data[i+1] = ring->rx_stats.bytes;
- data[i+2] = ring->rx_stats.drops;
- data[i+3] = ring->rx_stats.csum_err;
- data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
- i += IGB_RX_QUEUE_STATS_LEN;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
+ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
- spin_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@ -2318,17 +2168,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
sprintf(p, "rx_queue_%u_alloc_failed", i);
p += ETH_GSTRING_LEN;
}
- /* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
+/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
static int igb_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+#ifdef HAVE_PTP_1588_CLOCK
case e1000_82575:
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -2367,17 +2219,374 @@ static int igb_get_ts_info(struct net_device *dev,
info->rx_filters |=
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
+#endif /* HAVE_PTP_1588_CLOCK */
default:
return -EOPNOTSUPP;
}
}
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_put(&adapter->pdev->dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 igb_get_rx_csum(struct net_device *netdev)
+{
+ return !!(netdev->features & NETIF_F_RXCSUM);
+}
+
+static int igb_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ const u32 feature_list = NETIF_F_RXCSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+static int igb_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef NETIF_F_IPV6_CSUM
+ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+ u32 feature_list = NETIF_F_IP_CSUM;
+#endif
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ feature_list |= NETIF_F_SCTP_CSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int igb_set_tso(struct net_device *netdev, u32 data)
+{
+#ifdef NETIF_F_TSO6
+ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
+#else
+ const u32 feature_list = NETIF_F_TSO;
+#endif
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ if (!data) {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct net_device *v_netdev;
+ int i;
+
+ /* disable TSO on all VLANs if they're present */
+ if (!adapter->vlgrp)
+ goto tso_out;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+ if (!v_netdev)
+ continue;
+
+ v_netdev->features &= ~feature_list;
+ vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+ }
+ }
+
+tso_out:
+
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ return 0;
+}
+
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int igb_set_flags(struct net_device *netdev, u32 data)
+{
+ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
+ ETH_FLAG_RXHASH;
+#ifndef HAVE_VLAN_RX_REGISTER
+ u32 changed = netdev->features ^ data;
+#endif
+ int rc;
+#ifndef IGB_NO_LRO
+
+ supported_flags |= ETH_FLAG_LRO;
+#endif
+ /*
+ * Since there is no support for separate tx vlan accel
+ * enabled make sure tx flag is cleared if rx is.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ data &= ~ETH_FLAG_TXVLAN;
+
+ rc = ethtool_op_set_flags(netdev, data, supported_flags);
+ if (rc)
+ return rc;
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ if (changed & ETH_FLAG_RXVLAN)
+ igb_vlan_mode(netdev, data);
+#endif
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_SADV_COAL
+static int igb_set_adv_coal(struct net_device *netdev,
+ struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ switch (edata->data) {
+ case IGB_DMAC_DISABLE:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MIN:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_500:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_EN_DEFAULT:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_2000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_3000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_4000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_5000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_6000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_7000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_8000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_9000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MAX:
+ adapter->dmac = edata->data;
+ break;
+ default:
+ adapter->dmac = IGB_DMAC_DISABLE;
+ netdev_info(netdev,
+ "set_dmac: invalid setting, setting DMAC to %d\n",
+ adapter->dmac);
+ }
+ netdev_info(netdev, "%s: setting DMAC to %d\n",
+ netdev->name, adapter->dmac);
+ return 0;
+}
+
+#endif /* ETHTOOL_SADV_COAL */
+#ifdef ETHTOOL_GADV_COAL
+static void igb_get_dmac(struct net_device *netdev,
+ struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ edata->data = adapter->dmac;
+
+ return;
+}
+#endif
+
+#ifdef ETHTOOL_GEEE
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ret_val;
+ u16 phy_data;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
+
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
+
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+ E1000_EEE_LP_ADV_DEV_I210,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ default:
+ break;
+ }
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
+ edata->tx_lpi_enabled = true;
+
+ /*
+ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef ETHTOOL_SEEE
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ bool adv1g_eee = true, adv100m_eee = true;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI time is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!edata->advertised || (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n");
+ return -EINVAL;
+ }
+ adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
+ adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+
+ } else if (!edata->eee_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE options is not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+
+ /* reset link */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
+ igb_reset(adapter);
+ }
+
+ if (hw->mac.type == e1000_i354)
+ ret_val = e1000_set_eee_i354(hw, adv1g_eee, adv100m_eee);
+ else
+ ret_val = e1000_set_eee_i350(hw, adv1g_eee, adv100m_eee);
+
+ if (ret_val) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Problem setting EEE advertisement options\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif /* ETHTOOL_SEEE */
+#ifdef ETHTOOL_GRXFH
+#ifdef ETHTOOL_GRXFHINDIR
static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
struct ethtool_rxnfc *cmd)
@@ -2388,9 +2597,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2400,9 +2611,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2417,8 +2630,13 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
return 0;
}
+#endif /* ETHTOOL_GRXFHINDIR */
static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+ void *rule_locs)
+#else
+ u32 *rule_locs)
+#endif
{
struct igb_adapter *adapter = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -2428,9 +2646,11 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_rx_queues;
ret = 0;
break;
- case ETHTOOL_GRXFH:
+#ifdef ETHTOOL_GRXFHINDIR
+ case ETHTOOL_GRXFHINDIR:
ret = igb_get_rss_hash_opts(adapter, cmd);
break;
+#endif /* ETHTOOL_GRXFHINDIR */
default:
break;
}
@@ -2445,7 +2665,8 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
{
u32 flags = adapter->flags;
- /* RSS does not support anything other than hashing
+ /*
+ * RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
@@ -2512,11 +2733,11 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
/* if we changed something we need to update flags */
if (flags != adapter->flags) {
struct e1000_hw *hw = &adapter->hw;
- u32 mrqc = rd32(E1000_MRQC);
+ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
if ((flags & UDP_RSS_FLAGS) &&
!(adapter->flags & UDP_RSS_FLAGS))
- dev_err(&adapter->pdev->dev,
+ DPRINTK(DRV, WARNING,
"enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags = flags;
@@ -2536,7 +2757,7 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
- wr32(E1000_MRQC, mrqc);
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
return 0;
@@ -2558,261 +2779,436 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+#endif /* ETHTOOL_GRXFH */
+#ifdef ETHTOOL_GRXFHINDIR
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 ipcnfg, eeer, ret_val;
- u16 phy_data;
+ return IGB_RETA_SIZE;
+}
- if ((hw->mac.type < e1000_i350) ||
- (hw->phy.media_type != e1000_media_type_copper))
- return -EOPNOTSUPP;
+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+#ifdef HAVE_RXFH_HASHFUNC
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+#else
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+#endif /* HAVE_RXFH_HASHFUNC */
+#else
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+#endif /* HAVE_ETHTOOL_GSRSSH */
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
- ipcnfg = rd32(E1000_IPCNFG);
- eeer = rd32(E1000_EEER);
+ return 0;
+}
- /* EEE status on negotiated link */
- if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
- edata->advertised = ADVERTISED_1000baseT_Full;
+#else
+static int igb_get_rxfh_indir(struct net_device *netdev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ size_t copy_size =
+ min_t(size_t, indir->size, ARRAY_SIZE(adapter->rss_indir_tbl));
- if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
- edata->advertised |= ADVERTISED_100baseT_Full;
+ indir->size = ARRAY_SIZE(adapter->rss_indir_tbl);
+ memcpy(indir->ring_index, adapter->rss_indir_tbl,
+ copy_size * sizeof(indir->ring_index[0]));
+ return 0;
+}
+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
- /* EEE Link Partner Advertised */
switch (hw->mac.type) {
- case e1000_i350:
- ret_val = igb_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
- &phy_data);
- if (ret_val)
- return -ENODATA;
-
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
+ case e1000_82575:
+ shift = 6;
break;
- case e1000_i210:
- case e1000_i211:
- ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
- E1000_EEE_LP_ADV_DEV_I210,
- &phy_data);
- if (ret_val)
- return -ENODATA;
-
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
break;
default:
break;
}
- if (eeer & E1000_EEER_EEE_NEG)
- edata->eee_active = true;
-
- edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
- if (eeer & E1000_EEER_TX_LPI_EN)
- edata->tx_lpi_enabled = true;
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
- /* Report correct negotiated EEE status for devices that
- * wrongly report EEE at half-duplex
- */
- if (adapter->link_duplex == HALF_DUPLEX) {
- edata->eee_enabled = false;
- edata->eee_active = false;
- edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ E1000_WRITE_REG(hw, reg, val << shift);
+ reg += 4;
+ i += 4;
}
-
- return 0;
}
-static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+#ifdef HAVE_RXFH_HASHFUNC
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+#else
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
+#endif /* HAVE_RXFH_HASHFUNC */
+#else
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+#endif /* HAVE_ETHTOOL_GSRSSH */
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
- s32 ret_val;
+ int i;
+ u32 num_queues;
- if ((hw->mac.type < e1000_i350) ||
- (hw->phy.media_type != e1000_media_type_copper))
- return -EOPNOTSUPP;
+ num_queues = adapter->rss_queues;
- ret_val = igb_get_eee(netdev, &eee_curr);
- if (ret_val)
- return ret_val;
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
- if (eee_curr.eee_enabled) {
- if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
- dev_err(&adapter->pdev->dev,
- "Setting EEE tx-lpi is not supported\n");
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
return -EINVAL;
- }
- /* Tx LPI timer is not implemented currently */
- if (edata->tx_lpi_timer) {
- dev_err(&adapter->pdev->dev,
- "Setting EEE Tx LPI timer is not supported\n");
- return -EINVAL;
- }
- if (eee_curr.advertised != edata->advertised) {
- dev_err(&adapter->pdev->dev,
- "Setting EEE Advertisement is not supported\n");
- return -EINVAL;
- }
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
- } else if (!edata->eee_enabled) {
- dev_err(&adapter->pdev->dev,
- "Setting EEE options are not supported with EEE disabled\n");
- return -EINVAL;
- }
+ igb_write_rss_indir_tbl(adapter);
- if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
- hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
- igb_set_eee_i350(hw);
+ return 0;
+}
+#else
+static int igb_set_rxfh_indir(struct net_device *netdev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ size_t i;
- /* reset link */
- if (!netif_running(netdev))
- igb_reset(adapter);
- }
+ if (indir->size != ARRAY_SIZE(adapter->rss_indir_tbl))
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(adapter->rss_indir_tbl); i++)
+ if (indir->ring_index[i] >= adapter->rss_queues)
+ return -EINVAL;
+ memcpy(adapter->rss_indir_tbl, indir->ring_index,
+ sizeof(adapter->rss_indir_tbl));
+ igb_write_rss_indir_tbl(adapter);
return 0;
}
+#endif /* HAVE_ETHTOOL_GRXFHINDIR_SIZE */
+#endif /* ETHTOOL_SRXFHINDIR */
+#ifdef ETHTOOL_GCHANNELS
-static int igb_get_module_info(struct net_device *netdev,
- struct ethtool_modinfo *modinfo)
+static unsigned int igb_max_rss_queues(struct igb_adapter *adapter)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
- u16 sff8472_rev, addr_mode;
- bool page_swap = false;
+ unsigned int max_rss_queues;
- if ((hw->phy.media_type == e1000_media_type_copper) ||
- (hw->phy.media_type == e1000_media_type_unknown))
- return -EOPNOTSUPP;
+ /* Determine the maximum number of RSS queues supported. */
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
+ break;
+ }
- /* Check whether we support SFF-8472 or not */
- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
- if (status != E1000_SUCCESS)
- return -EIO;
+ return max_rss_queues;
+}
+
+static void igb_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
- /* addressing mode is not supported */
- status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
- if (status != E1000_SUCCESS)
- return -EIO;
+ /* report maximum channels */
+ ch->max_combined = igb_max_rss_queues(adapter);
+ ch->max_rx = ch->max_combined;
+ if (adapter->vfs_allocated_count)
+ ch->max_tx = 1;
+ else
+ ch->max_tx = ch->max_combined;
- /* addressing mode is not supported */
- if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
- hw_dbg("Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
- page_swap = true;
+ /* report info for other vector */
+ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
}
- if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
- /* We have an SFP, but it does not support SFF-8472 */
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ /* record RSS/TSS queues */
+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) {
+ if (adapter->num_rx_queues > adapter->num_tx_queues) {
+ ch->combined_count = adapter->num_tx_queues;
+ ch->rx_count = adapter->num_rx_queues -
+ adapter->num_tx_queues;
+ } else if (adapter->num_rx_queues < adapter->num_tx_queues) {
+ ch->combined_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues -
+ adapter->num_rx_queues;
+ } else {
+ ch->combined_count = adapter->num_rx_queues;
+ }
} else {
- /* We have an SFP which supports a revision of SFF-8472 */
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ ch->rx_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues;
}
-
- return 0;
}
+#endif /* ETHTOOL_GCHANNELS */
+#ifdef ETHTOOL_SCHANNELS
-static int igb_get_module_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *ee, u8 *data)
+static int igb_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
- u16 *dataword;
- u16 first_word, last_word;
- int i = 0;
+ struct igb_adapter *adapter = netdev_priv(dev);
+ unsigned int max_rss_queues;
- if (ee->len == 0)
+ /* we cannot support combined, Rx, and Tx vectors simultaneously */
+ if (ch->combined_count && ch->rx_count && ch->tx_count)
return -EINVAL;
- first_word = ee->offset >> 1;
- last_word = (ee->offset + ee->len - 1) >> 1;
+ /* ignore other_count since it is not changeable */
- dataword = kmalloc(sizeof(u16) * (last_word - first_word + 1),
- GFP_KERNEL);
- if (!dataword)
- return -ENOMEM;
+ /* verify we have at least one channel in each direction */
+ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count))
+ return -EINVAL;
- /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
- for (i = 0; i < last_word - first_word + 1; i++) {
- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS)
- /* Error occurred while reading module */
- return -EIO;
+ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */
+ if (adapter->vfs_allocated_count &&
+ ((ch->combined_count + ch->tx_count) > 1))
+ return -EINVAL;
- be16_to_cpus(&dataword[i]);
- }
+ /* verify the number of channels does not exceed hardware limits */
+ max_rss_queues = igb_max_rss_queues(adapter);
+ if (((ch->combined_count + ch->rx_count) > max_rss_queues) ||
+ ((ch->combined_count + ch->tx_count) > max_rss_queues))
+ return -EINVAL;
- memcpy(data, (u8 *)dataword + (ee->offset & 1), ee->len);
- kfree(dataword);
+ /* Determine if we need to pair queues. */
+ switch (adapter->hw.mac.type) {
+ case e1000_82575:
+ case e1000_i211:
+ /* Device supports enough interrupts without queue pairing. */
+ break;
+ case e1000_i350:
+ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */
+ if (adapter->vfs_allocated_count)
+ break;
+ case e1000_82576:
+ /*
+ * The PF has access to 6 interrupt vectors if the number of
+ * VFs is less than 7. If that is the case we don't have
+ * to pair up the queues.
+ */
+ if ((adapter->vfs_allocated_count > 0) &&
+ (adapter->vfs_allocated_count < 7))
+ break;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i210:
+ default:
+ /* verify we can support as many queues as requested */
+ if ((ch->combined_count +
+ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS)
+ return -EINVAL;
+ break;
+ }
- return 0;
-}
+ /* update configuration values */
+ adapter->rss_queues = ch->combined_count + ch->rx_count;
+ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count)
+ adapter->tss_queues = 0;
+ else
+ adapter->tss_queues = ch->combined_count + ch->tx_count;
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
+ if (ch->combined_count)
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_put(&adapter->pdev->dev);
+ /* update queue configuration for adapter */
+ return igb_setup_queues(adapter);
}
+#endif /* ETHTOOL_SCHANNELS */
static const struct ethtool_ops igb_ethtool_ops = {
- .get_settings = igb_get_settings,
- .set_settings = igb_set_settings,
- .get_drvinfo = igb_get_drvinfo,
- .get_regs_len = igb_get_regs_len,
- .get_regs = igb_get_regs,
- .get_wol = igb_get_wol,
- .set_wol = igb_set_wol,
- .get_msglevel = igb_get_msglevel,
- .set_msglevel = igb_set_msglevel,
- .nway_reset = igb_nway_reset,
- .get_link = igb_get_link,
- .get_eeprom_len = igb_get_eeprom_len,
- .get_eeprom = igb_get_eeprom,
- .set_eeprom = igb_set_eeprom,
- .get_ringparam = igb_get_ringparam,
- .set_ringparam = igb_set_ringparam,
- .get_pauseparam = igb_get_pauseparam,
- .set_pauseparam = igb_set_pauseparam,
- .self_test = igb_diag_test,
- .get_strings = igb_get_strings,
- .set_phys_id = igb_set_phys_id,
- .get_sset_count = igb_get_sset_count,
- .get_ethtool_stats = igb_get_ethtool_stats,
- .get_coalesce = igb_get_coalesce,
- .set_coalesce = igb_set_coalesce,
- .get_ts_info = igb_get_ts_info,
+ .get_settings = igb_get_settings,
+ .set_settings = igb_set_settings,
+ .get_drvinfo = igb_get_drvinfo,
+ .get_regs_len = igb_get_regs_len,
+ .get_regs = igb_get_regs,
+ .get_wol = igb_get_wol,
+ .set_wol = igb_set_wol,
+ .get_msglevel = igb_get_msglevel,
+ .set_msglevel = igb_set_msglevel,
+ .nway_reset = igb_nway_reset,
+ .get_link = igb_get_link,
+ .get_eeprom_len = igb_get_eeprom_len,
+ .get_eeprom = igb_get_eeprom,
+ .set_eeprom = igb_set_eeprom,
+ .get_ringparam = igb_get_ringparam,
+ .set_ringparam = igb_set_ringparam,
+ .get_pauseparam = igb_get_pauseparam,
+ .set_pauseparam = igb_set_pauseparam,
+ .self_test = igb_diag_test,
+ .get_strings = igb_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
+ .set_phys_id = igb_set_phys_id,
+#else
+ .phys_id = igb_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
+ .get_sset_count = igb_get_sset_count,
+#else
+ .get_stats_count = igb_get_stats_count,
+ .self_test_count = igb_diag_test_count,
+#endif
+ .get_ethtool_stats = igb_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+ .get_coalesce = igb_get_coalesce,
+ .set_coalesce = igb_set_coalesce,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+ .get_ts_info = igb_get_ts_info,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef CONFIG_PM_RUNTIME
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
+#endif /* CONFIG_PM_RUNTIME */
+#ifndef HAVE_NDO_SET_FEATURES
+ .get_rx_csum = igb_get_rx_csum,
+ .set_rx_csum = igb_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = igb_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = igb_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = igb_set_flags,
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GADV_COAL
+ .get_advcoal = igb_get_adv_coal,
+ .set_advcoal = igb_set_dmac_coal,
+#endif /* ETHTOOL_GADV_COAL */
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
+#endif
+#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+#endif
+#ifdef ETHTOOL_GRXFHINDIR
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+ .get_rxfh = igb_get_rxfh,
+#else
+ .get_rxfh_indir = igb_get_rxfh_indir,
+#endif /* HAVE_ETHTOOL_GSRSSH */
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+#if (defined(ETHTOOL_GRSSH) && !defined(HAVE_ETHTOOL_GSRSSH))
+ .set_rxfh = igb_set_rxfh,
+#else
+ .set_rxfh_indir = igb_set_rxfh_indir,
+#endif /* HAVE_ETHTOOL_GSRSSH */
+#endif /* ETHTOOL_SRXFHINDIR */
+#ifdef ETHTOOL_GCHANNELS
+ .get_channels = igb_get_channels,
+#endif /* ETHTOOL_GCHANNELS */
+#ifdef ETHTOOL_SCHANNELS
+ .set_channels = igb_set_channels,
+#endif /* ETHTOOL_SCHANNELS */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef ETHTOOL_GRXFH
.get_rxnfc = igb_get_rxnfc,
.set_rxnfc = igb_set_rxnfc,
+#endif
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
+ .size = sizeof(struct ethtool_ops_ext),
+ .get_ts_info = igb_get_ts_info,
+ .set_phys_id = igb_set_phys_id,
.get_eee = igb_get_eee,
.set_eee = igb_set_eee,
- .get_module_info = igb_get_module_info,
- .get_module_eeprom = igb_get_module_eeprom,
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
+#ifdef HAVE_ETHTOOL_GRXFHINDIR_SIZE
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+#endif /* HAVE_ETHTOOL_GRSFHINDIR_SIZE */
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
};
void igb_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
}
+#else
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+#ifndef ETHTOOL_OPS_COMPAT
+ netdev->ethtool_ops = (struct ethtool_ops *)&igb_ethtool_ops;
+#else
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
+#endif /* SET_ETHTOOL_OPS */
+}
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#endif /* SIOCETHTOOL */
+
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 58f1ce967aeb..382057ac45dd 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,14 +12,11 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
@@ -28,7 +25,7 @@
#include "igb.h"
#include "e1000_82575.h"
#include "e1000_hw.h"
-
+#ifdef IGB_HWMON
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sysfs.h>
@@ -38,28 +35,29 @@
#include <linux/hwmon.h>
#include <linux/pci.h>
-#ifdef CONFIG_IGB_HWMON
+#ifdef HAVE_I2C_SUPPORT
static struct i2c_board_info i350_sensor_info = {
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
};
+#endif /* HAVE_I2C_SUPPORT */
/* hwmon callback functions */
static ssize_t igb_hwmon_show_location(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
return sprintf(buf, "loc%u\n",
igb_attr->sensor->location);
}
static ssize_t igb_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value;
/* reset the temp field */
@@ -74,11 +72,11 @@ static ssize_t igb_hwmon_show_temp(struct device *dev,
}
static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value = igb_attr->sensor->caution_thresh;
/* display millidegree */
@@ -88,11 +86,11 @@ static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
}
static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
- dev_attr);
+ dev_attr);
unsigned int value = igb_attr->sensor->max_op_thresh;
/* display millidegree */
@@ -111,8 +109,7 @@ static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
* the data structures we need to get the data to display.
*/
static int igb_add_hwmon_attr(struct igb_adapter *adapter,
- unsigned int offset, int type)
-{
+ unsigned int offset, int type) {
int rc;
unsigned int n_attr;
struct hwmon_attr *igb_attr;
@@ -193,7 +190,9 @@ int igb_sysfs_init(struct igb_adapter *adapter)
unsigned int i;
int n_attrs;
int rc = 0;
+#ifdef HAVE_I2C_SUPPORT
struct i2c_client *client = NULL;
+#endif /* HAVE_I2C_SUPPORT */
/* If this method isn't defined we don't support thermals */
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -203,7 +202,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
if (rc)
goto exit;
-
+#ifdef HAVE_I2C_SUPPORT
/* init i2c_client */
client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
if (client == NULL) {
@@ -212,13 +211,14 @@ int igb_sysfs_init(struct igb_adapter *adapter)
goto exit;
}
adapter->i2c_client = client;
+#endif /* HAVE_I2C_SUPPORT */
/* Allocation space for max attributes
* max num sensors * values (loc, temp, max, caution)
*/
n_attrs = E1000_MAX_SENSORS * 4;
igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!igb_hwmon->hwmon_list) {
rc = -ENOMEM;
goto err;
@@ -254,4 +254,4 @@ err:
exit:
return rc;
}
-#endif
+#endif /* IGB_HWMON */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 64cbe0dfe043..4f9ffcab4484 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -12,111 +12,115 @@
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
+ Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/netdevice.h>
-#include <linux/ipv6.h>
-#include <linux/slab.h>
+#include <linux/tcp.h>
+#ifdef NETIF_F_TSO
#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
-#include <linux/net_tstamp.h>
+#endif
+#endif
+#ifdef SIOCGMIIPHY
#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
-#include <linux/if.h>
+#endif
#include <linux/if_vlan.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/sctp.h>
-#include <linux/if_ether.h>
-#include <linux/aer.h>
-#include <linux/prefetch.h>
+#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
-#ifdef CONFIG_IGB_DCA
-#include <linux/dca.h>
-#endif
-#include <linux/i2c.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/if_bridge.h>
+#include <linux/ctype.h>
#include "igb.h"
+#include "igb_vmdq.h"
+
+#if defined(DEBUG) || defined(DEBUG_DUMP) || defined(DEBUG_ICR) \
+ || defined(DEBUG_ITR)
+#define DRV_DEBUG "_debug"
+#else
+#define DRV_DEBUG
+#endif
+#define DRV_HW_PERF
+#define VERSION_SUFFIX
#define MAJ 5
-#define MIN 0
-#define BUILD 3
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-__stringify(BUILD) "-k"
+#define MIN 3
+#define BUILD 5.4
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "."\
+ __stringify(BUILD) VERSION_SUFFIX DRV_DEBUG DRV_HW_PERF
+
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
+ "Copyright (c) 2007-2015 Intel Corporation.";
-static const struct e1000_info *igb_info_tbl[] = {
- [board_82575] = &e1000_82575_info,
-};
+static char g_mac_addr[ETH_ALEN];
+static int g_usr_mac = 0;
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_TOOLS_ONLY) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_TOOLS_ONLY) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) },
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -125,82 +129,114 @@ static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
static int igb_sw_init(struct igb_adapter *);
-static int igb_open(struct net_device *);
-static int igb_close(struct net_device *);
static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *);
static void igb_clean_tx_ring(struct igb_ring *);
-static void igb_clean_rx_ring(struct igb_ring *);
static void igb_set_rx_mode(struct net_device *);
static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
+static void igb_dma_err_task(struct work_struct *);
+static void igb_dma_err_timer(unsigned long data);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+static struct net_device_stats *igb_get_stats(struct net_device *);
static int igb_change_mtu(struct net_device *, int);
+/* void igb_full_sync_mac_table(struct igb_adapter *adapter); */
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
static irqreturn_t igb_intr(int irq, void *);
static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32, u8);
static irqreturn_t igb_msix_ring(int irq, void *);
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
-#endif /* CONFIG_IGB_DCA */
+#endif /* IGB_DCA */
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *);
static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
-static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
-static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
+#ifdef HAVE_VLAN_RX_REGISTER
+static void igb_vlan_mode(struct net_device *, struct vlan_group *);
+#endif
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+static int igb_vlan_rx_add_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *,
+ __always_unused __be16 proto, u16);
+#else
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
+#endif
+#else
+static void igb_vlan_rx_add_vid(struct net_device *, u16);
+static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+#endif
static void igb_restore_vlan(struct igb_adapter *);
-static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static void igb_process_mdd_event(struct igb_adapter *);
+#ifdef IFLA_VF_MAX
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
- bool setting);
+ bool setting);
+#endif
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+ int min_tx_rate, int tx_rate);
+#else
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *ivi);
static void igb_check_vf_rate_limit(struct igb_adapter *);
-
-#ifdef CONFIG_PCI_IOV
-static int igb_vf_configure(struct igb_adapter *adapter, int vf);
#endif
-
+static int igb_vf_configure(struct igb_adapter *adapter, int vf);
#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
-static int igb_suspend(struct device *);
-#endif
-static int igb_resume(struct device *);
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+static int igb_suspend(struct device *dev);
+static int igb_resume(struct device *dev);
#ifdef CONFIG_PM_RUNTIME
static int igb_runtime_suspend(struct device *dev);
static int igb_runtime_resume(struct device *dev);
static int igb_runtime_idle(struct device *dev);
-#endif
+#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops igb_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+#ifdef CONFIG_PM_RUNTIME
SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
igb_runtime_idle)
+#endif /* CONFIG_PM_RUNTIME */
};
-#endif
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state);
+static int igb_resume(struct pci_dev *pdev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM */
+#ifndef USE_REBOOT_NOTIFIER
static void igb_shutdown(struct pci_dev *);
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
-#ifdef CONFIG_IGB_DCA
+#else
+static int igb_notify_reboot(struct notifier_block *, unsigned long, void *);
+static struct notifier_block igb_notifier_reboot = {
+ .notifier_call = igb_notify_reboot,
+ .next = NULL,
+ .priority = 0
+};
+#endif
+#ifdef IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
.notifier_call = igb_notify_dca,
@@ -212,541 +248,267 @@ static struct notifier_block dca_notifier = {
/* for netdump / net console */
static void igb_netpoll(struct net_device *);
#endif
-#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
-module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
-#endif /* CONFIG_PCI_IOV */
+#ifdef HAVE_PCI_ERS
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
pci_channel_state_t);
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
static void igb_io_resume(struct pci_dev *);
-static const struct pci_error_handlers igb_err_handler = {
+static struct pci_error_handlers igb_err_handler = {
.error_detected = igb_io_error_detected,
.slot_reset = igb_io_slot_reset,
.resume = igb_io_resume,
};
+#endif
+static void igb_init_fw(struct igb_adapter *adapter);
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
static struct pci_driver igb_driver = {
.name = igb_driver_name,
.id_table = igb_pci_tbl,
.probe = igb_probe,
- .remove = igb_remove,
+ .remove = __devexit_p(igb_remove),
#ifdef CONFIG_PM
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
.driver.pm = &igb_pm_ops,
-#endif
+#else
+ .suspend = igb_suspend,
+ .resume = igb_resume,
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
+#endif /* CONFIG_PM */
+#ifndef USE_REBOOT_NOTIFIER
.shutdown = igb_shutdown,
- .sriov_configure = igb_pci_sriov_configure,
+#endif
+#ifdef HAVE_PCI_ERS
.err_handler = &igb_err_handler
+#endif
};
+/* u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); */
+
MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-struct igb_reg_info {
- u32 ofs;
- char *name;
-};
-
-static const struct igb_reg_info igb_reg_info_tbl[] = {
-
- /* General Registers */
- {E1000_CTRL, "CTRL"},
- {E1000_STATUS, "STATUS"},
- {E1000_CTRL_EXT, "CTRL_EXT"},
-
- /* Interrupt Registers */
- {E1000_ICR, "ICR"},
-
- /* RX Registers */
- {E1000_RCTL, "RCTL"},
- {E1000_RDLEN(0), "RDLEN"},
- {E1000_RDH(0), "RDH"},
- {E1000_RDT(0), "RDT"},
- {E1000_RXDCTL(0), "RXDCTL"},
- {E1000_RDBAL(0), "RDBAL"},
- {E1000_RDBAH(0), "RDBAH"},
-
- /* TX Registers */
- {E1000_TCTL, "TCTL"},
- {E1000_TDBAL(0), "TDBAL"},
- {E1000_TDBAH(0), "TDBAH"},
- {E1000_TDLEN(0), "TDLEN"},
- {E1000_TDH(0), "TDH"},
- {E1000_TDT(0), "TDT"},
- {E1000_TXDCTL(0), "TXDCTL"},
- {E1000_TDFH, "TDFH"},
- {E1000_TDFT, "TDFT"},
- {E1000_TDFHS, "TDFHS"},
- {E1000_TDFPC, "TDFPC"},
-
- /* List Terminator */
- {}
-};
-
-/* igb_regdump - register printout routine */
-static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+/* Retrieve user set MAC address */
+static int __init setup_igb_mac(char *macstr)
{
- int n = 0;
- char rname[16];
- u32 regs[8];
-
- switch (reginfo->ofs) {
- case E1000_RDLEN(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDLEN(n));
- break;
- case E1000_RDH(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDH(n));
- break;
- case E1000_RDT(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDT(n));
- break;
- case E1000_RXDCTL(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RXDCTL(n));
- break;
- case E1000_RDBAL(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDBAL(n));
- break;
- case E1000_RDBAH(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDBAH(n));
- break;
- case E1000_TDBAL(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDBAL(n));
- break;
- case E1000_TDBAH(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_TDBAH(n));
- break;
- case E1000_TDLEN(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_TDLEN(n));
- break;
- case E1000_TDH(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_TDH(n));
- break;
- case E1000_TDT(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_TDT(n));
- break;
- case E1000_TXDCTL(0):
- for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_TXDCTL(n));
- break;
- default:
- pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
- return;
- }
-
- snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
- regs[2], regs[3]);
-}
-
-/* igb_dump - Print registers, Tx-rings and Rx-rings */
-static void igb_dump(struct igb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
- struct igb_reg_info *reginfo;
- struct igb_ring *tx_ring;
- union e1000_adv_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
- struct igb_ring *rx_ring;
- union e1000_adv_rx_desc *rx_desc;
- u32 staterr;
- u16 i, n;
-
- if (!netif_msg_hw(adapter))
- return;
-
- /* Print netdevice Info */
- if (netdev) {
- dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
- }
-
- /* Print Registers */
- dev_info(&adapter->pdev->dev, "Register Dump\n");
- pr_info(" Register Name Value\n");
- for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
- reginfo->name; reginfo++) {
- igb_regdump(hw, reginfo);
- }
-
- /* Print TX Ring Summary */
- if (!netdev || !netif_running(netdev))
- goto exit;
-
- dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
- for (n = 0; n < adapter->num_tx_queues; n++) {
- struct igb_tx_buffer *buffer_info;
- tx_ring = adapter->tx_ring[n];
- buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
- }
-
- /* Print TX Rings */
- if (!netif_msg_tx_done(adapter))
- goto rx_ring_summary;
-
- dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
-
- /* Transmit Descriptor Formats
- *
- * Advanced Transmit Descriptor
- * +--------------------------------------------------------------+
- * 0 | Buffer Address [63:0] |
- * +--------------------------------------------------------------+
- * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
- * +--------------------------------------------------------------+
- * 63 46 45 40 39 38 36 35 32 31 24 15 0
- */
-
- for (n = 0; n < adapter->num_tx_queues; n++) {
- tx_ring = adapter->tx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
-
- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
- const char *next_desc;
- struct igb_tx_buffer *buffer_info;
- tx_desc = IGB_TX_DESC(tx_ring, i);
- buffer_info = &tx_ring->tx_buffer_info[i];
- u0 = (struct my_u0 *)tx_desc;
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- next_desc = " NTC/U";
- else if (i == tx_ring->next_to_use)
- next_desc = " NTU";
- else if (i == tx_ring->next_to_clean)
- next_desc = " NTC";
- else
- next_desc = "";
-
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)dma_unmap_addr(buffer_info, dma),
- dma_unmap_len(buffer_info, len),
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp,
- buffer_info->skb, next_desc);
-
- if (netif_msg_pktdata(adapter) && buffer_info->skb)
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, buffer_info->skb->data,
- dma_unmap_len(buffer_info, len),
- true);
+ int i, j;
+ unsigned char result, value;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ result = 0;
+
+ if (i != 5 && *(macstr + 2) != ':')
+ return -1;
+
+ for (j = 0; j < 2; j++) {
+ if (isxdigit(*macstr)
+ && (value =
+ isdigit(*macstr) ? *macstr -
+ '0' : toupper(*macstr) - 'A' + 10) < 16) {
+ result = result * 16 + value;
+ macstr++;
+ } else
+ return -1;
}
- }
- /* Print RX Rings Summary */
-rx_ring_summary:
- dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- pr_info("Queue [NTU] [NTC]\n");
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- pr_info(" %5d %5X %5X\n",
- n, rx_ring->next_to_use, rx_ring->next_to_clean);
+ macstr++;
+ g_mac_addr[i] = result;
}
- /* Print RX Rings */
- if (!netif_msg_rx_status(adapter))
- goto exit;
-
- dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ g_usr_mac = 1;
- /* Advanced Receive Descriptor (Read) Format
- * 63 1 0
- * +-----------------------------------------------------+
- * 0 | Packet Buffer Address [63:1] |A0/NSE|
- * +----------------------------------------------+------+
- * 8 | Header Buffer Address [63:1] | DD |
- * +-----------------------------------------------------+
- *
- *
- * Advanced Receive Descriptor (Write-Back) Format
- *
- * 63 48 47 32 31 30 21 20 17 16 4 3 0
- * +------------------------------------------------------+
- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
- * | Checksum Ident | | | | Type | Type |
- * +------------------------------------------------------+
- * 8 | VLAN Tag | Length | Extended Error | Extended Status |
- * +------------------------------------------------------+
- * 63 48 47 32 31 20 19 0
- */
-
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- pr_info("------------------------------------\n");
- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
-
- for (i = 0; i < rx_ring->count; i++) {
- const char *next_desc;
- struct igb_rx_buffer *buffer_info;
- buffer_info = &rx_ring->rx_buffer_info[i];
- rx_desc = IGB_RX_DESC(rx_ring, i);
- u0 = (struct my_u0 *)rx_desc;
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-
- if (i == rx_ring->next_to_use)
- next_desc = " NTU";
- else if (i == rx_ring->next_to_clean)
- next_desc = " NTC";
- else
- next_desc = "";
-
- if (staterr & E1000_RXD_STAT_DD) {
- /* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
- "RWB", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- next_desc);
- } else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
- "R ", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
- next_desc);
-
- if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->page) {
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- page_address(buffer_info->page) +
- buffer_info->page_offset,
- IGB_RX_BUFSZ, true);
- }
- }
- }
- }
-
-exit:
- return;
-}
-
-/**
- * igb_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
- *
- * Returns the I2C data bit value
- **/
-static int igb_get_i2c_data(void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = rd32(E1000_I2CPARAMS);
-
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return 0;
}
-/**
- * igb_set_i2c_data - Sets the I2C data bit
- * @data: pointer to hardware structure
- * @state: I2C data value (0 or 1) to set
- *
- * Sets the I2C data bit
- **/
-static void igb_set_i2c_data(void *data, int state)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = rd32(E1000_I2CPARAMS);
-
- if (state)
- i2cctl |= E1000_I2C_DATA_OUT;
- else
- i2cctl &= ~E1000_I2C_DATA_OUT;
+__setup("igb_mac=", setup_igb_mac);
- i2cctl &= ~E1000_I2C_DATA_OE_N;
- i2cctl |= E1000_I2C_CLK_OE_N;
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-
-}
-
-/**
- * igb_set_i2c_clk - Sets the I2C SCL clock
- * @data: pointer to hardware structure
- * @state: state to set clock
- *
- * Sets the I2C clock line to state
- **/
-static void igb_set_i2c_clk(void *data, int state)
+static void igb_vfta_set(struct igb_adapter *adapter, u32 vid, bool add)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = rd32(E1000_I2CPARAMS);
+ struct e1000_host_mng_dhcp_cookie *mng_cookie = &hw->mng_cookie;
+ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ u32 vfta;
+
+ /*
+ * if this is the management vlan the only option is to add it in so
+ * that the management pass through will continue to work
+ */
+ if ((mng_cookie->status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+ (vid == mng_cookie->vlan_id))
+ add = TRUE;
- if (state) {
- i2cctl |= E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
- } else {
- i2cctl &= ~E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
- }
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-}
+ vfta = adapter->shadow_vfta[index];
-/**
- * igb_get_i2c_clk - Gets the I2C SCL clock state
- * @data: pointer to hardware structure
- *
- * Gets the I2C clock state
- **/
-static int igb_get_i2c_clk(void *data)
-{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
- struct e1000_hw *hw = &adapter->hw;
- s32 i2cctl = rd32(E1000_I2CPARAMS);
+ if (add)
+ vfta |= mask;
+ else
+ vfta &= ~mask;
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ e1000_write_vfta(hw, index, vfta);
+ adapter->shadow_vfta[index] = vfta;
}
-static const struct i2c_algo_bit_data igb_i2c_algo = {
- .setsda = igb_set_i2c_data,
- .setscl = igb_set_i2c_clk,
- .getsda = igb_get_i2c_data,
- .getscl = igb_get_i2c_clk,
- .udelay = 5,
- .timeout = 20,
-};
-
-/**
- * igb_get_hw_dev - return device
- * @hw: pointer to hardware structure
- *
- * used by hardware layer to print debugging information
- **/
-struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
-{
- struct igb_adapter *adapter = hw->back;
- return adapter->netdev;
-}
+static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none, ..., 16=all)");
/**
- * igb_init_module - Driver Registration Routine
+ * igb_init_module - Driver Registration Routine
*
- * igb_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
+ * igb_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
**/
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
pr_info("%s\n", igb_copyright);
+#ifdef IGB_HWMON
+/* only use IGB_PROCFS if IGB_HWMON is not defined */
+#else
+#ifdef IGB_PROCFS
+ if (igb_procfs_topdir_init())
+ pr_info("Procfs failed to initialize topdir\n");
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
dca_register_notify(&dca_notifier);
#endif
ret = pci_register_driver(&igb_driver);
+#ifdef USE_REBOOT_NOTIFIER
+ if (ret >= 0)
+ register_reboot_notifier(&igb_notifier_reboot);
+#endif
return ret;
}
module_init(igb_init_module);
/**
- * igb_exit_module - Driver Exit Cleanup Routine
+ * igb_exit_module - Driver Exit Cleanup Routine
*
- * igb_exit_module is called just before the driver is removed
- * from memory.
+ * igb_exit_module is called just before the driver is removed
+ * from memory.
**/
static void __exit igb_exit_module(void)
{
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
dca_unregister_notify(&dca_notifier);
#endif
+#ifdef USE_REBOOT_NOTIFIER
+ unregister_reboot_notifier(&igb_notifier_reboot);
+#endif
pci_unregister_driver(&igb_driver);
+
+#ifdef IGB_HWMON
+/* only compile IGB_PROCFS if IGB_HWMON is not defined */
+#else
+#ifdef IGB_PROCFS
+ igb_procfs_topdir_exit();
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
}
module_exit(igb_exit_module);
#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
/**
- * igb_cache_ring_register - Descriptor ring to register mapping
- * @adapter: board private structure to initialize
+ * igb_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
*
- * Once we know the feature-set enabled for the device, we'll cache
- * the register offset the descriptor ring is assigned to.
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
**/
static void igb_cache_ring_register(struct igb_adapter *adapter)
{
int i = 0, j = 0;
u32 rbase_offset = adapter->vfs_allocated_count;
- switch (adapter->hw.mac.type) {
- case e1000_82576:
+ if (adapter->hw.mac.type == e1000_82576) {
/* The queues are allocated for virtualization such that VF 0
* is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
* In order to avoid collision we start at the first free queue
* and continue consuming queues in the same sequence
*/
- if (adapter->vfs_allocated_count) {
+ if ((adapter->rss_queues > 1) && adapter->vmdq_pools) {
for (; i < adapter->rss_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset +
- Q_IDX_82576(i);
+ Q_IDX_82576(i);
}
- case e1000_82575:
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- case e1000_i211:
- default:
- for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = rbase_offset + i;
- for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j]->reg_idx = rbase_offset + j;
- break;
}
+ for (; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+ for (; j < adapter->num_tx_queues; j++)
+ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+}
+
+u32 e1000_read_reg(struct e1000_hw *hw, u32 reg)
+{
+ struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+ u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value = 0;
+
+ if (E1000_REMOVED(hw_addr))
+ return ~value;
+
+ value = readl(&hw_addr[reg]);
+
+ /* reads should not return all F's */
+ if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+ struct net_device *netdev = igb->netdev;
+
+ hw->hw_addr = NULL;
+ netif_device_detach(netdev);
+ netdev_err(netdev, "PCIe link lost, device now detached\n");
+ }
+
+ return value;
+}
+
+static void igb_configure_lli(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 port;
+
+ /* LLI should only be enabled for MSI-X or MSI interrupts */
+ if (!adapter->msix_entries && !(adapter->flags & IGB_FLAG_HAS_MSI))
+ return;
+
+ if (adapter->lli_port) {
+ /* use filter 0 for port */
+ port = htons((u16)adapter->lli_port);
+ E1000_WRITE_REG(hw, E1000_IMIR(0),
+ (port | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(0),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
+ }
+
+ if (adapter->flags & IGB_FLAG_LLI_PUSH) {
+ /* use filter 1 for push flag */
+ E1000_WRITE_REG(hw, E1000_IMIR(1),
+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(1),
+ (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_PSH));
+ }
+
+ if (adapter->lli_size) {
+ /* use filter 2 for size */
+ E1000_WRITE_REG(hw, E1000_IMIR(2),
+ (E1000_IMIR_PORT_BP | E1000_IMIR_PORT_IM_EN));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(2),
+ (adapter->lli_size | E1000_IMIREXT_CTRL_BP));
+ }
+
}
/**
@@ -764,7 +526,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
int index, int offset)
{
- u32 ivar = array_rd32(E1000_IVAR0, index);
+ u32 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
/* clear any bits that are currently set */
ivar &= ~((u32)0xFF << offset);
@@ -772,7 +534,7 @@ static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
/* write vector and valid bit */
ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
- array_wr32(E1000_IVAR0, index, ivar);
+ E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
}
#define IGB_N0_QUEUE -1
@@ -802,11 +564,12 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
if (!adapter->msix_entries && msix_vector == 0)
msixbm |= E1000_EIMS_OTHER;
- array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
case e1000_82576:
- /* 82576 uses a table that essentially consists of 2 columns
+ /*
+ * 82576 uses a table that essentially consists of 2 columns
* with 8 rows. The ordering is column-major so we use the
* lower 3 bits as the row index, and the 4th bit as the
* column offset.
@@ -826,7 +589,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
case e1000_i354:
case e1000_i210:
case e1000_i211:
- /* On 82580 and newer adapters the scheme is similar to 82576
+ /*
+ * On 82580 and newer adapters the scheme is similar to 82576
* however instead of ordering column-major we have things
* ordered row-major. So we traverse the table by using
* bit 0 as the column offset, and the remaining bits as the
@@ -855,11 +619,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
}
/**
- * igb_configure_msix - Configure MSI-X hardware
- * @adapter: board private structure to initialize
+ * igb_configure_msix - Configure MSI-X hardware
*
- * igb_configure_msix sets up the hardware to properly
- * generate MSI-X interrupts.
+ * igb_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
**/
static void igb_configure_msix(struct igb_adapter *adapter)
{
@@ -872,7 +635,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
/* set vector for other causes, i.e. link changes */
switch (hw->mac.type) {
case e1000_82575:
- tmp = rd32(E1000_CTRL_EXT);
+ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* enable MSI-X PBA support*/
tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -880,10 +643,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
tmp |= E1000_CTRL_EXT_EIAME;
tmp |= E1000_CTRL_EXT_IRCA;
- wr32(E1000_CTRL_EXT, tmp);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
/* enable msix_other interrupt */
- array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), vector++,
+ E1000_EIMS_OTHER);
adapter->eims_other = E1000_EIMS_OTHER;
break;
@@ -897,15 +661,15 @@ static void igb_configure_msix(struct igb_adapter *adapter)
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug.
*/
- wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
- E1000_GPIE_PBA | E1000_GPIE_EIAME |
- E1000_GPIE_NSICR);
+ E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_NSICR);
/* enable msix_other interrupt */
adapter->eims_other = 1 << vector;
tmp = (vector++ | E1000_IVAR_VALID) << 8;
- wr32(E1000_IVAR_MISC, tmp);
+ E1000_WRITE_REG(hw, E1000_IVAR_MISC, tmp);
break;
default:
/* do nothing, since nothing else supports MSI-X */
@@ -917,24 +681,22 @@ static void igb_configure_msix(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++)
igb_assign_vector(adapter->q_vector[i], vector++);
- wrfl();
+ E1000_WRITE_FLUSH(hw);
}
/**
- * igb_request_msix - Initialize MSI-X interrupts
- * @adapter: board private structure to initialize
+ * igb_request_msix - Initialize MSI-X interrupts
*
- * igb_request_msix allocates MSI-X vectors and requests interrupts from the
- * kernel.
+ * igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct e1000_hw *hw = &adapter->hw;
int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
- igb_msix_other, 0, netdev->name, adapter);
+ &igb_msix_other, 0, netdev->name, adapter);
if (err)
goto err_out;
@@ -943,7 +705,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
vector++;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+ q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
if (q_vector->rx.ring && q_vector->tx.ring)
sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
@@ -980,52 +742,80 @@ err_out:
return err;
}
-static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
{
- if (adapter->msix_entries) {
- pci_disable_msix(adapter->pdev);
- kfree(adapter->msix_entries);
- adapter->msix_entries = NULL;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- pci_disable_msi(adapter->pdev);
- }
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ adapter->q_vector[v_idx] = NULL;
+
+ /* igb_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+
+#ifndef IGB_NO_LRO
+ __skb_queue_purge(&q_vector->lrolist.active);
+#endif
}
/**
- * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * igb_reset_q_vector - Reset config for interrupt vector
* @adapter: board private structure to initialize
- * @v_idx: Index of vector to be freed
+ * @v_idx: Index of vector to be reset
*
- * This function frees the memory allocated to the q_vector. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * If NAPI is enabled it will delete any references to the
+ * NAPI struct. This is preparation for igb_free_q_vector.
**/
-static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
{
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* if we're coming from igb_set_interrupt_capability, the vectors are
+ * not yet allocated
+ */
+ if (!q_vector)
+ return;
+
if (q_vector->tx.ring)
adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
if (q_vector->rx.ring)
- adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+ adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
- adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* ixgbe_get_stats64() might access the rings on this vector,
- * we must wait a grace period before freeing it.
- */
- kfree_rcu(q_vector, rcu);
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+ int v_idx = adapter->num_q_vectors;
+
+ if (adapter->msix_entries) {
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+ pci_disable_msi(adapter->pdev);
+ }
+
+ while (v_idx--)
+ igb_reset_q_vector(adapter, v_idx);
}
/**
- * igb_free_q_vectors - Free memory allocated for interrupt vectors
- * @adapter: board private structure to initialize
+ * igb_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function frees the memory allocated to the q_vectors. In addition if
- * NAPI is enabled it will delete any references to the NAPI struct prior
- * to freeing the q_vector.
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
@@ -1035,16 +825,17 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
- while (v_idx--)
+ while (v_idx--) {
+ igb_reset_q_vector(adapter, v_idx);
igb_free_q_vector(adapter, v_idx);
+ }
}
/**
- * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
- * @adapter: board private structure to initialize
+ * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
*
- * This function resets the device so that it has 0 Rx queues, Tx queues, and
- * MSI-X interrupts allocated.
+ * This function resets the device so that it has 0 rx queues, tx queues, and
+ * MSI-X interrupts allocated.
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
@@ -1053,83 +844,278 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
}
/**
- * igb_set_interrupt_capability - set MSI or MSI-X if supported
- * @adapter: board private structure to initialize
- * @msix: boolean value of MSIX capability
+ * igb_process_mdd_event
+ * @adapter - board private structure
*
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
+ * Identify a malicious VF, disable the VF TX/RX queues and log a message.
+ */
+static void igb_process_mdd_event(struct igb_adapter *adapter)
{
- int err;
- int numvecs, i;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 lvmmc, vfte, vfre, mdfb;
+ u8 vf_queue;
- if (!msix)
- goto msi_only;
+ lvmmc = E1000_READ_REG(hw, E1000_LVMMC);
+ vf_queue = lvmmc >> 29;
- /* Number of supported queues. */
- adapter->num_rx_queues = adapter->rss_queues;
- if (adapter->vfs_allocated_count)
- adapter->num_tx_queues = 1;
- else
- adapter->num_tx_queues = adapter->rss_queues;
+ /* VF index cannot be bigger or equal to VFs allocated */
+ if (vf_queue >= adapter->vfs_allocated_count)
+ return;
+
+ netdev_info(adapter->netdev,
+ "VF %d misbehaved. VF queues are disabled. VM misbehavior code is 0x%x\n",
+ vf_queue, lvmmc);
+
+ /* Disable VFTE and VFRE related bits */
+ vfte = E1000_READ_REG(hw, E1000_VFTE);
+ vfte &= ~(1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_VFTE, vfte);
+
+ vfre = E1000_READ_REG(hw, E1000_VFRE);
+ vfre &= ~(1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_VFRE, vfre);
- /* start with one vector for every Rx queue */
- numvecs = adapter->num_rx_queues;
+ /* Disable MDFB related bit. Clear on write */
+ mdfb = E1000_READ_REG(hw, E1000_MDFB);
+ mdfb |= (1 << vf_queue);
+ E1000_WRITE_REG(hw, E1000_MDFB, mdfb);
- /* if Tx handler is separate add 1 for every Tx queue */
- if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
- numvecs += adapter->num_tx_queues;
+ /* Reset the specific VF */
+ E1000_WRITE_REG(hw, E1000_VTCTRL(vf_queue), E1000_VTCTRL_RST);
+}
- /* store the number of vectors reserved for queues */
- adapter->num_q_vectors = numvecs;
+/**
+ * igb_disable_mdd
+ * @adapter - board private structure
+ *
+ * Disable MDD behavior in the HW
+ **/
+static void igb_disable_mdd(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
- /* add 1 vector for link status interrupts */
- numvecs++;
- adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
- GFP_KERNEL);
+ if ((hw->mac.type != e1000_i350) &&
+ (hw->mac.type != e1000_i354))
+ return;
- if (!adapter->msix_entries)
- goto msi_only;
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg &= (~E1000_DTXCTL_MDP_EN);
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+}
- for (i = 0; i < numvecs; i++)
- adapter->msix_entries[i].entry = i;
+/**
+ * igb_enable_mdd
+ * @adapter - board private structure
+ *
+ * Enable the HW to detect malicious driver and sends an interrupt to
+ * the driver.
+ **/
+static void igb_enable_mdd(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ /* Only available on i350 device */
+ if (hw->mac.type != e1000_i350)
return;
- igb_reset_interrupt_capability(adapter);
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg |= E1000_DTXCTL_MDP_EN;
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+}
- /* If we can't do MSI-X, try MSI */
-msi_only:
-#ifdef CONFIG_PCI_IOV
- /* disable SR-IOV for non MSI-X configurations */
+/**
+ * igb_reset_sriov_capability - disable SR-IOV if enabled
+ *
+ * Attempt to disable single root IO virtualization capabilites present in the
+ * kernel.
+ **/
+static void igb_reset_sriov_capability(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* reclaim resources allocated to VFs */
if (adapter->vf_data) {
- struct e1000_hw *hw = &adapter->hw;
- /* disable iov and allow time for transactions to clear */
- pci_disable_sriov(adapter->pdev);
- msleep(500);
+ if (!pci_vfs_assigned(pdev)) {
+ /*
+ * disable iov and allow time for transactions to
+ * clear
+ */
+ pci_disable_sriov(pdev);
+ msleep(500);
+ dev_info(pci_dev_to_dev(pdev), "IOV Disabled\n");
+ } else {
+ dev_info(pci_dev_to_dev(pdev),
+ "IOV Not Disabled\n VF(s) are assigned to guests!\n");
+ }
+ /* Disable Malicious Driver Detection */
+ igb_disable_mdd(adapter);
+
+ /* free vf data storage */
kfree(adapter->vf_data);
adapter->vf_data = NULL;
- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
- wrfl();
+
+ /* switch rings back to PF ownership */
+ E1000_WRITE_REG(hw, E1000_IOVCTL,
+ E1000_IOVCTL_REUSE_VFQ);
+ E1000_WRITE_FLUSH(hw);
msleep(100);
- dev_info(&adapter->pdev->dev, "IOV Disabled\n");
}
-#endif
+
+ adapter->vfs_allocated_count = 0;
+}
+
+/**
+ * igb_set_sriov_capability - setup SR-IOV if supported
+ *
+ * Attempt to enable single root IO virtualization capabilites present in the
+ * kernel.
+ **/
+static void igb_set_sriov_capability(struct igb_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int old_vfs = 0;
+ int i;
+
+ old_vfs = pci_num_vf(pdev);
+ if (old_vfs) {
+ dev_info(pci_dev_to_dev(pdev),
+ "%d pre-allocated VFs found - override max_vfs setting of %d\n",
+ old_vfs, adapter->vfs_allocated_count);
+ adapter->vfs_allocated_count = old_vfs;
+ }
+ /* no VFs requested, do nothing */
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ /* allocate vf data storage */
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+
+ if (adapter->vf_data) {
+ if (!old_vfs) {
+ if (pci_enable_sriov(pdev,
+ adapter->vfs_allocated_count))
+ goto err_out;
+ dev_warn(pci_dev_to_dev(pdev),
+ "SR-IOV has been enabled: configure port VLANs to keep your VFs secure\n");
+ }
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ /* Enable VM to VM loopback by default */
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ break;
+ default:
+ /* Currently no other hardware supports loopback */
+ break;
+ }
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ if (adapter->hw.mac.type >= e1000_i350)
+ adapter->dmac = IGB_DMAC_DISABLE;
+ if (adapter->hw.mac.type < e1000_i350)
+ adapter->flags |= IGB_FLAG_DETECT_BAD_DMA;
+ return;
+
+ }
+
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
- adapter->rss_queues = 1;
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
- adapter->num_q_vectors = 1;
- if (!pci_enable_msi(adapter->pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
+ dev_warn(pci_dev_to_dev(pdev),
+ "Failed to initialize SR-IOV virtualization\n");
+}
+
+/**
+ * igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+ int numvecs, i;
+
+ if (!msix)
+ adapter->int_mode = IGB_INT_MODE_MSI;
+
+ /* Number of supported queues. */
+ adapter->num_rx_queues = adapter->rss_queues;
+
+ if (adapter->vmdq_pools > 1)
+ adapter->num_rx_queues += adapter->vmdq_pools - 1;
+
+#ifdef HAVE_TX_MQ
+ if (adapter->vmdq_pools)
+ adapter->num_tx_queues = adapter->vmdq_pools;
+ else
+ adapter->num_tx_queues = adapter->num_rx_queues;
+#else
+ adapter->num_tx_queues = max_t(u32, 1, adapter->vmdq_pools);
+#endif
+
+ switch (adapter->int_mode) {
+ case IGB_INT_MODE_MSIX:
+ /* start with one vector for every Tx/Rx queue */
+ numvecs = max_t(int, adapter->num_tx_queues,
+ adapter->num_rx_queues);
+
+ /* if tx handler is separate make it 1 for every queue */
+ if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+ numvecs = adapter->num_tx_queues +
+ adapter->num_rx_queues;
+
+ /* store the number of vectors reserved for queues */
+ adapter->num_q_vectors = numvecs;
+
+ /* add 1 vector for link status interrupts */
+ numvecs++;
+ adapter->msix_entries = kcalloc(numvecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (adapter->msix_entries) {
+ for (i = 0; i < numvecs; i++)
+ adapter->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev,
+ adapter->msix_entries, numvecs);
+ if (err == 0)
+ break;
+ }
+ /* MSI-X failed, so fall through and try MSI */
+ dev_warn(pci_dev_to_dev(pdev),
+ "Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
+ igb_reset_interrupt_capability(adapter);
+ case IGB_INT_MODE_MSI:
+ if (!pci_enable_msi(pdev))
+ adapter->flags |= IGB_FLAG_HAS_MSI;
+ else
+ dev_warn(pci_dev_to_dev(pdev),
+ "Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
+ /* Fall through */
+ case IGB_INT_MODE_LEGACY:
+ /* disable advanced features and set number of queues to 1 */
+ igb_reset_sriov_capability(adapter);
+ adapter->vmdq_pools = 0;
+ adapter->rss_queues = 1;
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_q_vectors = 1;
+ /* Don't do anything; this is system default */
+ break;
+ }
}
static void igb_add_ring(struct igb_ring *ring,
@@ -1140,21 +1126,21 @@ static void igb_add_ring(struct igb_ring *ring,
}
/**
- * igb_alloc_q_vector - Allocate memory for a single interrupt vector
- * @adapter: board private structure to initialize
- * @v_count: q_vectors allocated on adapter, used for ring interleaving
- * @v_idx: index of vector in adapter struct
- * @txr_count: total number of Tx rings to allocate
- * @txr_idx: index of first Tx ring to allocate
- * @rxr_count: total number of Rx rings to allocate
- * @rxr_idx: index of first Rx ring to allocate
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int igb_alloc_q_vector(struct igb_adapter *adapter,
- int v_count, int v_idx,
- int txr_count, int txr_idx,
- int rxr_count, int rxr_idx)
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
{
struct igb_q_vector *q_vector;
struct igb_ring *ring;
@@ -1169,10 +1155,19 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
(sizeof(struct igb_ring) * ring_count);
/* allocate q_vector and rings */
- q_vector = kzalloc(size, GFP_KERNEL);
+ q_vector = adapter->q_vector[v_idx];
+ if (!q_vector)
+ q_vector = kzalloc(size, GFP_KERNEL);
+ else
+ memset(q_vector, 0, size);
if (!q_vector)
return -ENOMEM;
+#ifndef IGB_NO_LRO
+ /* initialize LRO */
+ __skb_queue_head_init(&q_vector->lrolist.active);
+
+#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
igb_poll, 64);
@@ -1185,7 +1180,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize ITR configuration */
- q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
q_vector->itr_val = IGB_START_ITR;
/* initialize pointer to rings */
@@ -1239,15 +1234,17 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
/* update q_vector Rx values */
igb_add_ring(ring, &q_vector->rx);
+#if defined(HAVE_RHEL6_NET_DEVICE_OPS_EXT) || !defined(HAVE_NDO_SET_FEATURES)
+ /* enable rx checksum */
+ set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
+
+#endif
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- */
- if (adapter->hw.mac.type >= e1000_i350)
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
/* apply Rx specific ring traits */
@@ -1261,13 +1258,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return 0;
}
-
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
- * @adapter: board private structure to initialize
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
@@ -1294,6 +1290,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1321,11 +1318,9 @@ err_out:
}
/**
- * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
- * @adapter: board private structure to initialize
- * @msix: boolean value of MSIX capability
+ * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
*
- * This function initializes the interrupts and allocates all of the queues.
+ * This function initializes the interrupts and allocates all of the queues.
**/
static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
@@ -1336,7 +1331,7 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
err = igb_alloc_q_vectors(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for vectors\n");
goto err_alloc_q_vectors;
}
@@ -1350,11 +1345,10 @@ err_alloc_q_vectors:
}
/**
- * igb_request_irq - initialize interrupts
- * @adapter: board private structure to initialize
+ * igb_request_irq - initialize interrupts
*
- * Attempts to configure interrupts using the best available
- * capabilities of the hardware and kernel.
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
**/
static int igb_request_irq(struct igb_adapter *adapter)
{
@@ -1371,10 +1365,10 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_free_all_rx_resources(adapter);
igb_clear_interrupt_scheme(adapter);
+ igb_reset_sriov_capability(adapter);
err = igb_init_interrupt_scheme(adapter, false);
if (err)
goto request_done;
-
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
igb_configure(adapter);
@@ -1383,7 +1377,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_assign_vector(adapter->q_vector[0], 0);
if (adapter->flags & IGB_FLAG_HAS_MSI) {
- err = request_irq(pdev->irq, igb_intr_msi, 0,
+ err = request_irq(pdev->irq, &igb_intr_msi, 0,
netdev->name, adapter);
if (!err)
goto request_done;
@@ -1393,11 +1387,11 @@ static int igb_request_irq(struct igb_adapter *adapter)
adapter->flags &= ~IGB_FLAG_HAS_MSI;
}
- err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
+ err = request_irq(pdev->irq, &igb_intr, IRQF_SHARED,
netdev->name, adapter);
if (err)
- dev_err(&pdev->dev, "Error %d getting interrupt\n",
+ dev_err(pci_dev_to_dev(pdev), "Error %d getting interrupt\n",
err);
request_done:
@@ -1420,40 +1414,48 @@ static void igb_free_irq(struct igb_adapter *adapter)
}
/**
- * igb_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
+ * igb_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void igb_irq_disable(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- /* we need to be careful when disabling interrupts. The VFs are also
+ /*
+ * we need to be careful when disabling interrupts. The VFs are also
* mapped into these registers and so clearing the bits can cause
* issues on the VF drivers so we only need to clear what we set
*/
if (adapter->msix_entries) {
- u32 regval = rd32(E1000_EIAM);
- wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
- wr32(E1000_EIMC, adapter->eims_enable_mask);
- regval = rd32(E1000_EIAC);
- wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
+ u32 regval = E1000_READ_REG(hw, E1000_EIAM);
+
+ E1000_WRITE_REG(hw, E1000_EIAM, regval
+ & ~adapter->eims_enable_mask);
+ E1000_WRITE_REG(hw, E1000_EIMC, adapter->eims_enable_mask);
+ regval = E1000_READ_REG(hw, E1000_EIAC);
+ E1000_WRITE_REG(hw, E1000_EIAC, regval
+ & ~adapter->eims_enable_mask);
}
- wr32(E1000_IAM, 0);
- wr32(E1000_IMC, ~0);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_IAM, 0);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+
if (adapter->msix_entries) {
- int i;
+ int vector = 0, i;
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
+
for (i = 0; i < adapter->num_q_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
+ synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
- * igb_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
+ * igb_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void igb_irq_enable(struct igb_adapter *adapter)
{
@@ -1461,20 +1463,27 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->msix_entries) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
- u32 regval = rd32(E1000_EIAC);
- wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
- regval = rd32(E1000_EIAM);
- wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
- wr32(E1000_EIMS, adapter->eims_enable_mask);
+ u32 regval = E1000_READ_REG(hw, E1000_EIAC);
+
+ E1000_WRITE_REG(hw, E1000_EIAC, regval
+ | adapter->eims_enable_mask);
+ regval = E1000_READ_REG(hw, E1000_EIAM);
+ E1000_WRITE_REG(hw, E1000_EIAM, regval
+ | adapter->eims_enable_mask);
+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_enable_mask);
if (adapter->vfs_allocated_count) {
- wr32(E1000_MBVFIMR, 0xFF);
+ E1000_WRITE_REG(hw, E1000_MBVFIMR, 0xFF);
ims |= E1000_IMS_VMMB;
+ if (adapter->mdd)
+ if ((adapter->hw.mac.type == e1000_i350) ||
+ (adapter->hw.mac.type == e1000_i354))
+ ims |= E1000_IMS_MDDET;
}
- wr32(E1000_IMS, ims);
+ E1000_WRITE_REG(hw, E1000_IMS, ims);
} else {
- wr32(E1000_IMS, IMS_ENABLE_MASK |
+ E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK |
E1000_IMS_DRSTA);
- wr32(E1000_IAM, IMS_ENABLE_MASK |
+ E1000_WRITE_REG(hw, E1000_IAM, IMS_ENABLE_MASK |
E1000_IMS_DRSTA);
}
}
@@ -1487,7 +1496,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
/* add VID to filter table */
- igb_vfta_set(hw, vid, true);
+ igb_vfta_set(adapter, vid, TRUE);
adapter->mng_vlan_id = vid;
} else {
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
@@ -1495,19 +1504,24 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
(vid != old_vid) &&
+#ifdef HAVE_VLAN_RX_REGISTER
+ !vlan_group_get_device(adapter->vlgrp, old_vid)) {
+#else
!test_bit(old_vid, adapter->active_vlans)) {
+#endif
/* remove VID from filter table */
- igb_vfta_set(hw, old_vid, false);
+ igb_vfta_set(adapter, old_vid, FALSE);
}
}
/**
- * igb_release_hw_control - release control of the h/w to f/w
- * @adapter: address of board private structure
+ * igb_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
*
- * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that the
- * driver is no longer loaded.
**/
static void igb_release_hw_control(struct igb_adapter *adapter)
{
@@ -1515,18 +1529,19 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
u32 ctrl_ext;
/* Let firmware take over control of h/w */
- ctrl_ext = rd32(E1000_CTRL_EXT);
- wr32(E1000_CTRL_EXT,
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
}
/**
- * igb_get_hw_control - get control of the h/w from f/w
- * @adapter: address of board private structure
+ * igb_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded.
*
- * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded.
**/
static void igb_get_hw_control(struct igb_adapter *adapter)
{
@@ -1534,14 +1549,14 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
u32 ctrl_ext;
/* Let firmware know the driver has taken over */
- ctrl_ext = rd32(E1000_CTRL_EXT);
- wr32(E1000_CTRL_EXT,
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
}
/**
- * igb_configure - configure the hardware for RX and TX
- * @adapter: private board structure
+ * igb_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
**/
static void igb_configure(struct igb_adapter *adapter)
{
@@ -1560,7 +1575,13 @@ static void igb_configure(struct igb_adapter *adapter)
igb_configure_tx(adapter);
igb_configure_rx(adapter);
- igb_rx_fifo_flush_82575(&adapter->hw);
+ e1000_rx_fifo_flush_82575(&adapter->hw);
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+ if (adapter->num_tx_queues > 1)
+ netdev->features |= NETIF_F_MULTI_QUEUE;
+ else
+ netdev->features &= ~NETIF_F_MULTI_QUEUE;
+#endif
/* call igb_desc_unused which always leaves
* at least 1 descriptor unused to make sure
@@ -1573,34 +1594,217 @@ static void igb_configure(struct igb_adapter *adapter)
}
/**
- * igb_power_up_link - Power up the phy/serdes link
- * @adapter: address of board private structure
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
- igb_reset_phy(&adapter->hw);
+ e1000_phy_hw_reset(&adapter->hw);
if (adapter->hw.phy.media_type == e1000_media_type_copper)
- igb_power_up_phy_copper(&adapter->hw);
+ e1000_power_up_phy(&adapter->hw);
else
- igb_power_up_serdes_link_82575(&adapter->hw);
+ e1000_power_up_fiber_serdes_link(&adapter->hw);
}
/**
- * igb_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
*/
static void igb_power_down_link(struct igb_adapter *adapter)
{
if (adapter->hw.phy.media_type == e1000_media_type_copper)
- igb_power_down_phy_copper_82575(&adapter->hw);
+ e1000_power_down_phy(&adapter->hw);
else
- igb_shutdown_serdes_link_82575(&adapter->hw);
+ e1000_shutdown_fiber_serdes_link(&adapter->hw);
+}
+
+/* Detect and switch function for Media Auto Sense */
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl_ext, connsw;
+ bool swap_now = false;
+ bool link;
+
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ link = igb_has_link(adapter);
+
+ /* need to live swap if current media is copper and we have fiber/serdes
+ * to go to.
+ */
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+ swap_now = true;
+ } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+ /* copper signal takes time to appear */
+ if (adapter->copper_tries < 3) {
+ adapter->copper_tries++;
+ connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ return;
+ } else {
+ adapter->copper_tries = 0;
+ if ((connsw & E1000_CONNSW_PHYSD) &&
+ (!(connsw & E1000_CONNSW_PHY_PDN))) {
+ swap_now = true;
+ connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ }
+ }
+ }
+
+ if (swap_now) {
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to fiber/serdes\n",
+ adapter->netdev->name);
+ ctrl_ext |=
+ E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ adapter->copper_tries = 0;
+ break;
+ case e1000_media_type_internal_serdes:
+ case e1000_media_type_fiber:
+ dev_info(pci_dev_to_dev(adapter->pdev),
+ "%s:MAS: changing media to copper\n",
+ adapter->netdev->name);
+ ctrl_ext &=
+ ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ break;
+ default:
+ /* shouldn't get here during regular operation */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid media type found, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl);
+ E1000_WRITE_FLUSH(hw);
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
+
+ return !!(i2cctl & E1000_I2C_CLK_IN);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
+#endif /* HAVE_I2C_SUPPORT */
/**
- * igb_up - Open the interface and prepare it to handle traffic
- * @adapter: board private structure
+ * igb_up - Open the interface and prepare it to handle traffic
+ * @adapter: board private structure
**/
int igb_up(struct igb_adapter *adapter)
{
@@ -1620,23 +1824,32 @@ int igb_up(struct igb_adapter *adapter)
else
igb_assign_vector(adapter->q_vector[0], 0);
+ igb_configure_lli(adapter);
+
/* Clear any pending interrupts. */
- rd32(E1000_ICR);
+ E1000_READ_REG(hw, E1000_ICR);
igb_irq_enable(adapter);
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
- u32 reg_data = rd32(E1000_CTRL_EXT);
+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
- wr32(E1000_CTRL_EXT, reg_data);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
}
netif_tx_start_all_queues(adapter->netdev);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ schedule_work(&adapter->dma_err_task);
/* start the watchdog. */
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
+ if ((adapter->flags & IGB_FLAG_EEE) &&
+ (!hw->dev_spec._82575.eee_disable))
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
return 0;
}
@@ -1653,44 +1866,48 @@ void igb_down(struct igb_adapter *adapter)
set_bit(__IGB_DOWN, &adapter->state);
/* disable receives in the hardware */
- rctl = rd32(E1000_RCTL);
- wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
/* flush and sleep below */
+ netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
/* disable transmits in the hardware */
- tctl = rd32(E1000_TCTL);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_EN;
- wr32(E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
- wrfl();
- msleep(10);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
for (i = 0; i < adapter->num_q_vectors; i++)
napi_disable(&(adapter->q_vector[i]->napi));
igb_irq_disable(adapter);
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
del_timer_sync(&adapter->watchdog_timer);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ del_timer_sync(&adapter->dma_err_timer);
del_timer_sync(&adapter->phy_info_timer);
- netif_carrier_off(netdev);
-
/* record the stats before reset*/
- spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
- spin_unlock(&adapter->stats64_lock);
+ igb_update_stats(adapter);
adapter->link_speed = 0;
adapter->link_duplex = 0;
+#ifdef HAVE_PCI_ERS
if (!pci_channel_offline(adapter->pdev))
igb_reset(adapter);
+#else
+ igb_reset(adapter);
+#endif
igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter);
-#ifdef CONFIG_IGB_DCA
-
+#ifdef IGB_DCA
/* since we reset the hardware DCA settings were cleared */
igb_setup_dca(adapter);
#endif
@@ -1700,12 +1917,34 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
}
+/**
+ * igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+void igb_enable_mas(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 connsw;
+
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+
+ /* configure for SerDes media detect */
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ (!(connsw & E1000_CONNSW_SERDESD))) {
+ connsw |= E1000_CONNSW_ENRGSRC;
+ connsw |= E1000_CONNSW_AUTOSENSE_EN;
+ E1000_WRITE_REG(hw, E1000_CONNSW, connsw);
+ E1000_WRITE_FLUSH(hw);
+ }
+}
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -1719,13 +1958,13 @@ void igb_reset(struct igb_adapter *adapter)
*/
switch (mac->type) {
case e1000_i350:
- case e1000_i354:
case e1000_82580:
- pba = rd32(E1000_RXPBS);
- pba = igb_rxpbs_adjust_82580(pba);
+ case e1000_i354:
+ pba = E1000_READ_REG(hw, E1000_RXPBS);
+ pba = e1000_rxpbs_adjust_82580(pba);
break;
case e1000_82576:
- pba = rd32(E1000_RXPBS);
+ pba = E1000_READ_REG(hw, E1000_RXPBS);
pba &= E1000_RXPBS_SIZE_MASK_82576;
break;
case e1000_82575:
@@ -1739,7 +1978,7 @@ void igb_reset(struct igb_adapter *adapter)
if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
(mac->type < e1000_82576)) {
/* adjust PBA for jumbo frames */
- wr32(E1000_PBA, pba);
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
@@ -1748,12 +1987,12 @@ void igb_reset(struct igb_adapter *adapter)
* one full receive packet and is similarly rounded up and
* expressed in KB.
*/
- pba = rd32(E1000_PBA);
+ pba = E1000_READ_REG(hw, E1000_PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /* the Tx fifo also stores 16 bytes of information about the Tx
+ /* the tx fifo also stores 16 bytes of information about the tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -1774,13 +2013,13 @@ void igb_reset(struct igb_adapter *adapter)
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
- /* if short on Rx space, Rx wins and must trump Tx
+ /* if short on rx space, rx wins and must trump tx
* adjustment
*/
if (pba < min_rx_space)
pba = min_rx_space;
}
- wr32(E1000_PBA, pba);
+ E1000_WRITE_REG(hw, E1000_PBA, pba);
}
/* flow control settings */
@@ -1802,6 +2041,11 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
+ /*
+ * Clear all flags except indication that the PF has set
+ * the VF MAC addresses administratively
+ */
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -1809,91 +2053,334 @@ void igb_reset(struct igb_adapter *adapter)
igb_ping_all_vfs(adapter);
/* disable transmits and receives */
- wr32(E1000_VFRE, 0);
- wr32(E1000_VFTE, 0);
+ E1000_WRITE_REG(hw, E1000_VFRE, 0);
+ E1000_WRITE_REG(hw, E1000_VFTE, 0);
}
/* Allow time for pending master requests to run */
- hw->mac.ops.reset_hw(hw);
- wr32(E1000_WUC, 0);
+ e1000_reset_hw(hw);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
- if (hw->mac.ops.init_hw(hw))
- dev_err(&pdev->dev, "Hardware Error\n");
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ e1000_setup_init_funcs(hw, TRUE);
+ igb_check_options(adapter);
+ e1000_get_bus_info(hw);
+ adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+ }
+ if ((mac->type == e1000_82575) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ igb_enable_mas(adapter);
+ }
+ if (e1000_init_hw(hw))
+ dev_err(pci_dev_to_dev(pdev), "Hardware Error\n");
- /* Flow control settings reset on hardware reset, so guarantee flow
+ /*
+ * Flow control settings reset on hardware reset, so guarantee flow
* control is off when forcing speed.
*/
if (!hw->mac.autoneg)
- igb_force_mac_fc(hw);
+ e1000_force_mac_fc(hw);
igb_init_dmac(adapter, pba);
-#ifdef CONFIG_IGB_HWMON
/* Re-initialize the thermal sensor on i350 devices. */
- if (!test_bit(__IGB_DOWN, &adapter->state)) {
- if (mac->type == e1000_i350 && hw->bus.func == 0) {
- /* If present, re-initialize the external thermal sensor
- * interface.
- */
- if (adapter->ets)
- mac->ops.init_thermal_sensor_thresh(hw);
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /*
+ * If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
+ e1000_set_i2c_bb(hw);
+ e1000_init_thermal_sensor_thresh(hw);
+ }
+
+ /*Re-establish EEE setting */
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (mac->type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ e1000_set_eee_i350(hw, true, true);
+ break;
+ case e1000_i354:
+ e1000_set_eee_i354(hw, true, true);
+ break;
+ default:
+ break;
}
}
-#endif
+
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
igb_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
- wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+ E1000_WRITE_REG(hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+#ifdef HAVE_PTP_1588_CLOCK
/* Re-enable PTP, where applicable. */
igb_ptp_reset(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ e1000_get_phy_info(hw);
- igb_get_phy_info(hw);
+ adapter->devrc++;
}
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+static u32 igb_fix_features(struct net_device *netdev,
+ u32 features)
+#else
static netdev_features_t igb_fix_features(struct net_device *netdev,
- netdev_features_t features)
+ netdev_features_t features)
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
{
- /* Since there is no support for separate Rx/Tx vlan accel
- * enable/disable make sure Tx flag is always in same state as Rx.
+ /*
+ * Since there is no support for separate tx vlan accel
+ * enabled make sure tx flag is cleared if rx is.
*/
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- features |= NETIF_F_HW_VLAN_CTAG_TX;
- else
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+#else
+ if (!(features & NETIF_F_HW_VLAN_RX))
+ features &= ~NETIF_F_HW_VLAN_TX;
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#ifndef IGB_NO_LRO
+ /* If Rx checksum is disabled, then LRO should also be disabled */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+#endif
return features;
}
static int igb_set_features(struct net_device *netdev,
- netdev_features_t features)
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ u32 features)
+#else
+ netdev_features_t features)
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
{
netdev_features_t changed = netdev->features ^ features;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
struct igb_adapter *adapter = netdev_priv(netdev);
+#endif
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+#else
+ if (changed & NETIF_F_HW_VLAN_RX)
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+ netdev->features = features;
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_vlan_mode(netdev, adapter->vlgrp);
+#else
igb_vlan_mode(netdev, features);
+#endif
- if (!(changed & NETIF_F_RXALL))
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
netdev->features = features;
- if (netif_running(netdev))
- igb_reinit_locked(adapter);
- else
- igb_reset(adapter);
+ return 0;
+}
+#endif /* HAVE_NDO_SET_FEATURES */
+
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+#ifdef HAVE_NDO_FDB_ADD_VID
+ u16 vid,
+#endif /* HAVE_NDO_FDB_ADD_VID */
+ u16 flags)
+#else /* USE_CONST_DEV_UC_CHAR */
+static int igb_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif /* USE_CONST_DEV_UC_CHAR */
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ int err;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
+ u32 rar_uc_entries = hw->mac.rar_entry_count -
+ (adapter->vfs_allocated_count + 1);
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
+ err = dev_uc_add_excl(dev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int igb_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif /* USE_CONST_DEV_UC_CHAR */
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ pr_info("%s: FDB only supports static addresses\n",
+ igb_driver_name);
+ return -EINVAL;
+ }
+
+ if (adapter->vfs_allocated_count) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int igb_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->vfs_allocated_count)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#ifdef HAVE_BRIDGE_ATTRIBS
+#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+static int igb_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+#else
+static int igb_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ case e1000_i354:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ e1000_vmdq_set_loopback_pf(hw, 0);
+ adapter->flags &= ~IGB_FLAG_LOOPBACK_ENABLE;
+ } else if (mode == BRIDGE_MODE_VEB) {
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ adapter->flags |= IGB_FLAG_LOOPBACK_ENABLE;
+ } else
+ return -EINVAL;
+
+ netdev_info(adapter->netdev, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
return 0;
}
+#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+#elif defined(HAVE_BRIDGE_FILTER)
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask)
+#else
+static int igb_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->vfs_allocated_count))
+ return -EOPNOTSUPP;
+
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags,
+ filter_mask, NULL);
+#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS)
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags);
+#elif defined(NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS)
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0);
+#else
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+#endif /* NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS */
+}
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif /* HAVE_FDB_OPS */
+#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
.ndo_start_xmit = igb_xmit_frame,
- .ndo_get_stats64 = igb_get_stats64,
+ .ndo_get_stats = igb_get_stats,
.ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac,
.ndo_change_mtu = igb_change_mtu,
@@ -1902,141 +2389,329 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
+#else
.ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
- .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
+#endif /*HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
.ndo_get_vf_config = igb_ndo_get_vf_config,
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ .ndo_vlan_rx_register = igb_vlan_mode,
+#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = igb_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = igb_ndo_fdb_del,
+ .ndo_fdb_dump = igb_ndo_fdb_dump,
+#endif
+#ifdef HAVE_BRIDGE_ATTRIBS
+ .ndo_bridge_setlink = igb_ndo_bridge_setlink,
+ .ndo_bridge_getlink = igb_ndo_bridge_getlink,
+#endif /* HAVE_BRIDGE_ATTRIBS */
+#endif
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+};
+
+/* RHEL6 keeps these operations in a separate structure */
+static const struct net_device_ops_ext igb_netdev_ops_ext = {
+ .size = sizeof(struct net_device_ops_ext),
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#ifdef HAVE_NDO_SET_FEATURES
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
+#endif /* HAVE_NDO_SET_FEATURES */
};
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+static const struct net_device_ops igb_vmdq_ops = {
+ .ndo_open = &igb_vmdq_open,
+ .ndo_stop = &igb_vmdq_close,
+ .ndo_start_xmit = &igb_vmdq_xmit_frame,
+ .ndo_get_stats = &igb_vmdq_get_stats,
+ .ndo_set_rx_mode = &igb_vmdq_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &igb_vmdq_set_mac,
+ .ndo_change_mtu = &igb_vmdq_change_mtu,
+ .ndo_tx_timeout = &igb_vmdq_tx_timeout,
+ .ndo_vlan_rx_register = &igb_vmdq_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid,
+};
+
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+#endif /* HAVE_NET_DEVICE_OPS */
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+void igb_assign_vmdq_netdev_ops(struct net_device *vnetdev)
+{
+#ifdef HAVE_NET_DEVICE_OPS
+ vnetdev->netdev_ops = &igb_vmdq_ops;
+#else
+ dev->open = &igb_vmdq_open;
+ dev->stop = &igb_vmdq_close;
+ dev->hard_start_xmit = &igb_vmdq_xmit_frame;
+ dev->get_stats = &igb_vmdq_get_stats;
+#ifdef HAVE_SET_RX_MODE
+ dev->set_rx_mode = &igb_vmdq_set_rx_mode;
+#endif
+ dev->set_multicast_list = &igb_vmdq_set_rx_mode;
+ dev->set_mac_address = &igb_vmdq_set_mac;
+ dev->change_mtu = &igb_vmdq_change_mtu;
+#ifdef HAVE_TX_TIMEOUT
+ dev->tx_timeout = &igb_vmdq_tx_timeout;
+#endif
+#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX)
+ dev->vlan_rx_register = &igb_vmdq_vlan_rx_register;
+ dev->vlan_rx_add_vid = &igb_vmdq_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = &igb_vmdq_vlan_rx_kill_vid;
+#endif
+#endif /* HAVE_NET_DEVICE_OPS */
+ igb_vmdq_set_ethtool_ops(vnetdev);
+ vnetdev->watchdog_timeo = 5 * HZ;
+
+}
+
+int igb_init_vmdq_netdevs(struct igb_adapter *adapter)
+{
+ int pool, err = 0, base_queue;
+ struct net_device *vnetdev;
+ struct igb_vmdq_adapter *vmdq_adapter;
+
+ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
+ int qpp = (!adapter->rss_queues ? 1 : adapter->rss_queues);
+
+ base_queue = pool * qpp;
+ vnetdev = alloc_etherdev(sizeof(struct igb_vmdq_adapter));
+ if (!vnetdev) {
+ err = -ENOMEM;
+ break;
+ }
+
+ vmdq_adapter = netdev_priv(vnetdev);
+ vmdq_adapter->vnetdev = vnetdev;
+ vmdq_adapter->real_adapter = adapter;
+ vmdq_adapter->rx_ring = adapter->rx_ring[base_queue];
+ vmdq_adapter->tx_ring = adapter->tx_ring[base_queue];
+ igb_assign_vmdq_netdev_ops(vnetdev);
+ snprintf(vnetdev->name, IFNAMSIZ, "%sv%d",
+ adapter->netdev->name, pool);
+ vnetdev->features = adapter->netdev->features;
+#ifdef HAVE_NETDEV_VLAN_FEATURES
+ vnetdev->vlan_features = adapter->netdev->vlan_features;
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ adapter->vmdq_netdev[pool-1] = vnetdev;
+ err = register_netdev(vnetdev);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+int igb_remove_vmdq_netdevs(struct igb_adapter *adapter)
+{
+ int pool, err = 0;
+
+ for (pool = 1; pool < adapter->vmdq_pools; pool++) {
+ unregister_netdev(adapter->vmdq_netdev[pool-1]);
+ free_netdev(adapter->vmdq_netdev[pool-1]);
+ adapter->vmdq_netdev[pool-1] = NULL;
+ }
+ return err;
+}
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
/**
* igb_set_fw_version - Configure version string for ethtool
* @adapter: adapter struct
+ *
**/
-void igb_set_fw_version(struct igb_adapter *adapter)
+static void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- igb_get_fw_version(hw, &fw);
+ e1000_get_fw_version(hw, &fw);
switch (hw->mac.type) {
+ case e1000_i210:
case e1000_i211:
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
-
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+ }
+ /* fall through */
default:
- /* if option is rom valid, display its version too */
+ /* if option rom is valid, display its version too*/
if (fw.or_valid) {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor, fw.etrack_id,
- fw.or_major, fw.or_build, fw.or_patch);
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else {
+ if (fw.etrack_id != 0X0000) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
+ }
}
break;
}
- return;
}
/**
- * igb_init_i2c - Init I2C interface
- * @adapter: pointer to adapter structure
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
**/
-static s32 igb_init_i2c(struct igb_adapter *adapter)
+static void igb_init_mas(struct igb_adapter *adapter)
{
- s32 status = E1000_SUCCESS;
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_data;
- /* I2C interface supported on i350 devices */
- if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
+ e1000_read_nvm(hw, NVM_COMPAT, 1, &eeprom_data);
+ switch (hw->bus.func) {
+ case E1000_FUNC_0:
+ if (eeprom_data & IGB_MAS_ENABLE_0)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_1:
+ if (eeprom_data & IGB_MAS_ENABLE_1)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_2:
+ if (eeprom_data & IGB_MAS_ENABLE_2)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ case E1000_FUNC_3:
+ if (eeprom_data & IGB_MAS_ENABLE_3)
+ adapter->flags |= IGB_FLAG_MAS_ENABLE;
+ break;
+ default:
+ /* Shouldn't get here */
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "%s:AMS: Invalid port configuration, returning\n",
+ adapter->netdev->name);
+ break;
+ }
+}
- /* Initialize the i2c bus which is controlled by the registers.
- * This bus will use the i2c_algo_bit structue that implements
- * the protocol through toggling of the 4 bits in the register.
+void igb_rar_set(struct igb_adapter *adapter, u32 index)
+{
+ u32 rar_low, rar_high;
+ struct e1000_hw *hw = &adapter->hw;
+ u8 *addr = adapter->mac_table[index].addr;
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
*/
- adapter->i2c_adap.owner = THIS_MODULE;
- adapter->i2c_algo = igb_i2c_algo;
- adapter->i2c_algo.data = adapter;
- adapter->i2c_adap.algo_data = &adapter->i2c_algo;
- adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
- sizeof(adapter->i2c_adap.name));
- status = i2c_bit_add_bus(&adapter->i2c_adap);
- return status;
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Indicate to hardware the Address is Valid. */
+ if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE)
+ rar_high |= E1000_RAH_AV;
+
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue;
+ else
+ rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue;
+
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
}
/**
- * igb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in igb_pci_tbl
+ * igb_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in igb_pci_tbl
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
*
- * igb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
+ * igb_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
**/
-static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int igb_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igb_adapter *adapter;
struct e1000_hw *hw;
u16 eeprom_data = 0;
+ u8 pba_str[E1000_PBANUM_LENGTH];
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
- const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
- u8 part_str[E1000_PBANUM_LENGTH];
-
- /* Catch broken hardware that put the wrong VF device ID in
- * the PCIe SR-IOV capability.
- */
- if (pdev->is_virtfn) {
- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
- pci_name(pdev), pdev->vendor, pdev->device);
- return -EINVAL;
- }
+ static int cards_found;
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ u32 hw_features;
+#else
+ netdev_features_t hw_features;
+#endif
+#endif
err = pci_enable_device_mem(pdev);
if (err)
return err;
pci_using_dac = 0;
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64));
if (!err) {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
+ DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(pci_dev_to_dev(pdev),
+ DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
+ IGB_ERR(
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM),
+#ifndef HAVE_ASPM_QUIRKS
+ /* 82575 requires that the pci-e link partner disable the L0s state */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+ default:
+ break;
+ }
+
+#endif /* HAVE_ASPM_QUIRKS */
+ err = pci_request_selected_regions(pdev,
+ pci_select_bars(pdev,
+ IORESOURCE_MEM),
igb_driver_name);
if (err)
goto err_pci_reg;
@@ -2044,14 +2719,18 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
- pci_save_state(pdev);
err = -ENOMEM;
+#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
IGB_MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct igb_adapter));
+#endif /* HAVE_TX_MQ */
if (!netdev)
goto err_alloc_etherdev;
+ SET_MODULE_OWNER(netdev);
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
@@ -2060,142 +2739,259 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->pdev = pdev;
hw = &adapter->hw;
hw->back = adapter;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
-
- mmio_start = pci_resource_start(pdev, 0);
- mmio_len = pci_resource_len(pdev, 0);
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = (1 << debug) - 1;
+#ifdef HAVE_PCI_ERS
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_ioremap;
+#endif
err = -EIO;
- hw->hw_addr = ioremap(mmio_start, mmio_len);
- if (!hw->hw_addr)
+ adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!adapter->io_addr)
goto err_ioremap;
+ /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
+ hw->hw_addr = adapter->io_addr;
+#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &igb_netdev_ops;
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_ops_ext(netdev, &igb_netdev_ops_ext);
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+#else /* HAVE_NET_DEVICE_OPS */
+ netdev->open = &igb_open;
+ netdev->stop = &igb_close;
+ netdev->get_stats = &igb_get_stats;
+#ifdef HAVE_SET_RX_MODE
+ netdev->set_rx_mode = &igb_set_rx_mode;
+#endif
+ netdev->set_multicast_list = &igb_set_rx_mode;
+ netdev->set_mac_address = &igb_set_mac;
+ netdev->change_mtu = &igb_change_mtu;
+ netdev->do_ioctl = &igb_ioctl;
+#ifdef HAVE_TX_TIMEOUT
+ netdev->tx_timeout = &igb_tx_timeout;
+#endif
+ netdev->vlan_rx_register = igb_vlan_mode;
+ netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ netdev->poll_controller = igb_netpoll;
+#endif
+ netdev->hard_start_xmit = &igb_xmit_frame;
+#endif /* HAVE_NET_DEVICE_OPS */
igb_set_ethtool_ops(netdev);
+#ifdef HAVE_TX_TIMEOUT
netdev->watchdog_timeo = 5 * HZ;
+#endif
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
- netdev->mem_start = mmio_start;
- netdev->mem_end = mmio_start + mmio_len;
-
- /* PCI config space info */
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->revision_id = pdev->revision;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
-
- /* Copy the default MAC, PHY and NVM function pointers */
- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
- /* Initialize skew-specific constants */
- err = ei->get_invariants(hw);
- if (err)
- goto err_sw_init;
+ adapter->bd_number = cards_found;
/* setup the private structure */
err = igb_sw_init(adapter);
if (err)
goto err_sw_init;
- igb_get_bus_info_pcie(hw);
+ e1000_get_bus_info(hw);
- hw->phy.autoneg_wait_to_complete = false;
+ hw->phy.autoneg_wait_to_complete = FALSE;
+ hw->mac.adaptive_ifs = FALSE;
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
hw->phy.mdix = AUTO_ALL_MODES;
- hw->phy.disable_polarity_correction = false;
+ hw->phy.disable_polarity_correction = FALSE;
hw->phy.ms_type = e1000_ms_hw_default;
}
- if (igb_check_reset_block(hw))
- dev_info(&pdev->dev,
+ if (e1000_check_reset_block(hw))
+ dev_info(pci_dev_to_dev(pdev),
"PHY reset is blocked due to SOL/IDER session.\n");
- /* features is initialized to 0 in allocation, it might have bits
+ /*
+ * features is initialized to 0 in allocation, it might have bits
* set by igb_sw_init so we should use an or instead of an
* assignment.
*/
netdev->features |= NETIF_F_SG |
NETIF_F_IP_CSUM |
+#ifdef NETIF_F_IPV6_CSUM
NETIF_F_IPV6_CSUM |
+#endif
+#ifdef NETIF_F_TSO
NETIF_F_TSO |
+#ifdef NETIF_F_TSO6
NETIF_F_TSO6 |
+#endif
+#endif /* NETIF_F_TSO */
+#ifdef NETIF_F_RXHASH
NETIF_F_RXHASH |
+#endif
NETIF_F_RXCSUM |
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX;
+#else
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_TX;
+#endif
+
+ if (hw->mac.type >= e1000_82576)
+ netdev->features |= NETIF_F_SCTP_CSUM;
+#ifdef HAVE_NDO_SET_FEATURES
/* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features |= NETIF_F_RXALL;
+#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ hw_features = netdev->hw_features;
+
+ /* give us the option of enabling LRO later */
+ hw_features |= NETIF_F_LRO;
+
+#else
+ hw_features = get_netdev_hw_features(netdev);
+
+#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */
+ hw_features |= netdev->features;
+
+#else
+#ifdef NETIF_F_GRO
+
+ /* this is only needed on kernels prior to 2.6.39 */
+ netdev->features |= NETIF_F_GRO;
+#endif /* NETIF_F_GRO */
+#endif /* HAVE_NDO_SET_FEATURES */
/* set this bit last since it cannot be part of hw_features */
+#ifdef NETIF_F_HW_VLAN_CTAG_FILTER
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif /* NETIF_F_HW_FLAN_CTAG_FILTER */
+#ifdef NETIF_F_HW_VLAN_TX
+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
+#endif /* NETIF_F_HW_VLAN_TX */
+
+#ifdef HAVE_NDO_SET_FEATURES
+#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT
+ set_netdev_hw_features(netdev, hw_features);
+#else
+ netdev->hw_features = hw_features;
+#endif
+#endif
+#ifdef HAVE_NETDEV_VLAN_FEATURES
netdev->vlan_features |= NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_SG;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
-
- if (pci_using_dac) {
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
- if (hw->mac.type >= e1000_82576) {
- netdev->hw_features |= NETIF_F_SCTP_CSUM;
- netdev->features |= NETIF_F_SCTP_CSUM;
- }
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
+ adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
+#ifdef DEBUG
+ if (adapter->dmac != IGB_DMAC_DISABLE)
+ netdev_info(netdev, "%s: DMA Coalescing is enabled..\n",
+ netdev->name);
+#endif
/* before reading the NVM, reset the controller to put the device in a
* known good starting state
*/
- hw->mac.ops.reset_hw(hw);
-
- /* make sure the NVM is good , i211 parts have special NVM that
- * doesn't contain a checksum
- */
- if (hw->mac.type != e1000_i211) {
- if (hw->nvm.ops.validate(hw) < 0) {
- dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
+ e1000_reset_hw(hw);
+
+ /* make sure the NVM is good */
+ if (e1000_validate_nvm_checksum(hw) < 0) {
+#if defined (CONFIG_MACH_APALIS_T30) || defined (CONFIG_MACH_APALIS_TK1)
+ /* only warn on NVM validation failures */
+ dev_warn(pci_dev_to_dev(pdev),
+ "The NVM Checksum Is Not Valid\n");
+#else /* CONFIG_MACH_APALIS_T30 || CONFIG_MACH_APALIS_TK1 */
+ dev_err(pci_dev_to_dev(pdev),
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+#endif /* CONFIG_MACH_APALIS_T30 || CONFIG_MACH_APALIS_TK1 */
}
/* copy the MAC address out of the NVM */
- if (hw->mac.ops.read_mac_addr(hw))
- dev_err(&pdev->dev, "NVM Read Error\n");
+ if (e1000_read_mac_addr(hw))
+ dev_err(pci_dev_to_dev(pdev), "NVM Read Error\n");
+
+#if defined (CONFIG_MACH_APALIS_T30) || defined (CONFIG_MACH_APALIS_TK1)
+ if (g_usr_mac && (g_usr_mac < 3)) {
+ /* Get user set MAC address */
+ if (g_usr_mac == 2) {
+ /* 0x100000 offset for 2nd Ethernet MAC */
+ g_mac_addr[3] += 0x10;
+ if (g_mac_addr[3] < 0x10)
+ dev_warn(pci_dev_to_dev(pdev), "MAC address "
+ "byte 3 (0x%02x) wrap around",
+ g_mac_addr[3]);
+ }
+ memcpy(hw->mac.addr, g_mac_addr, ETH_ALEN);
+ g_usr_mac++;
+ }
+#endif /* CONFIG_MACH_APALIS_T30 || CONFIG_MACH_APALIS_TK1 */
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
if (!is_valid_ether_addr(netdev->dev_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address\n");
+#endif
+#if defined (CONFIG_MACH_APALIS_T30) || defined (CONFIG_MACH_APALIS_TK1)
+ /* Use Toradex OUI as default */
+ char default_mac_addr[ETH_ALEN] = {0x0, 0x14, 0x2d, 0x0, 0x0, 0x0};
+ dev_warn(&pdev->dev, "using Toradex OUI as default igb MAC");
+ memcpy(hw->mac.addr, default_mac_addr, ETH_ALEN);
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+ memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+#endif
+#else /* CONFIG_MACH_APALIS_T30 || CONFIG_MACH_APALIS_TK1 */
+ dev_err(pci_dev_to_dev(pdev), "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
+#endif /* CONFIG_MACH_APALIS_T30 || CONFIG_MACH_APALIS_TK1 */
}
+ memcpy(&adapter->mac_table[0].addr, hw->mac.addr, netdev->addr_len);
+ adapter->mac_table[0].queue = adapter->vfs_allocated_count;
+ adapter->mac_table[0].state = (IGB_MAC_STATE_DEFAULT
+ | IGB_MAC_STATE_IN_USE);
+ igb_rar_set(adapter, 0);
+
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
- setup_timer(&adapter->watchdog_timer, igb_watchdog,
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+ E1000_WRITE_REG(hw, E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ E1000_WRITE_REG(hw, E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
+ /* Check if Media Autosense is enabled */
+ if (hw->mac.type == e1000_82580)
+ igb_init_mas(adapter);
+ setup_timer(&adapter->watchdog_timer, &igb_watchdog,
(unsigned long) adapter);
- setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ setup_timer(&adapter->dma_err_timer, &igb_dma_err_timer,
+ (unsigned long) adapter);
+ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
(unsigned long) adapter);
INIT_WORK(&adapter->reset_task, igb_reset_task);
INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ INIT_WORK(&adapter->dma_err_task, igb_dma_err_task);
/* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
@@ -2205,19 +3001,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->fc.requested_mode = e1000_fc_default;
hw->fc.current_mode = e1000_fc_default;
- igb_validate_mdi_setting(hw);
+ e1000_validate_mdi_setting(hw);
/* By default, support wake on port A */
if (hw->bus.func == 0)
adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
- /* Check the NVM for wake support on non-port A ports */
+ /* Check the NVM for wake support for non-port A ports */
if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
else if (hw->bus.func == 1)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
if (eeprom_data & IGB_EEPROM_APME)
adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
@@ -2236,7 +3032,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting
*/
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
+ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1)
adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
@@ -2251,9 +3047,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
global_quad_port_a = 0;
break;
default:
- /* If the device can't wake, don't set software support */
- if (!device_can_wakeup(&adapter->pdev->dev))
- adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+ break;
}
/* initialize the wol settings based on the eeprom settings */
@@ -2267,119 +3061,187 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->wol = 0;
}
- device_set_wakeup_enable(&adapter->pdev->dev,
+ /* Some vendors want the ability to Use the EEPROM setting as
+ * enable/disable only, and not for capability
+ */
+ if (((hw->mac.type == e1000_i350) ||
+ (hw->mac.type == e1000_i354)) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+ if (hw->mac.type == e1000_i350) {
+ if (((pdev->subsystem_device == 0x5001) ||
+ (pdev->subsystem_device == 0x5002)) &&
+ (hw->bus.func == 0)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+ if (pdev->subsystem_device == 0x1F52)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ }
+
+ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev),
adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
+ adapter->devrc = 0;
+#ifdef HAVE_I2C_SUPPORT
/* Init the I2C interface */
err = igb_init_i2c(adapter);
if (err) {
dev_err(&pdev->dev, "failed to init i2c interface\n");
goto err_eeprom;
}
+#endif /* HAVE_I2C_SUPPORT */
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
- strcpy(netdev->name, "eth%d");
+ strncpy(netdev->name, "eth%d", IFNAMSIZ);
err = register_netdev(netdev);
if (err)
goto err_register;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ err = igb_init_vmdq_netdevs(adapter);
+ if (err)
+ goto err_register;
+#endif
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
-#ifdef CONFIG_IGB_DCA
- if (dca_add_requester(&pdev->dev) == 0) {
+#ifdef IGB_DCA
+ if (dca_add_requester(&pdev->dev) == E1000_SUCCESS) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
- dev_info(&pdev->dev, "DCA enabled\n");
+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
igb_setup_dca(adapter);
}
#endif
-#ifdef CONFIG_IGB_HWMON
+#ifdef HAVE_PTP_1588_CLOCK
+ /* do hw tstamp init after resetting */
+ igb_ptp_init(adapter);
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ dev_info(pci_dev_to_dev(pdev), "Intel(R) Gigabit Ethernet Network Connection\n");
+ /* print bus type/speed/width info */
+ dev_info(pci_dev_to_dev(pdev), "%s: (PCIe:%s:%s) ",
+ netdev->name,
+ ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5GT/s" :
+ (hw->bus.speed == e1000_bus_speed_5000) ? "5.0GT/s" :
+ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"),
+ ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
+ (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+ (hw->mac.type == e1000_i354) ? "integrated" : "unknown"));
+ netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
+
+ ret_val = e1000_read_pba_string(hw, pba_str, E1000_PBANUM_LENGTH);
+ if (ret_val)
+ strcpy(pba_str, "Unknown");
+ dev_info(pci_dev_to_dev(pdev), "%s: PBA No: %s\n", netdev->name,
+ pba_str);
+
/* Initialize the thermal sensor on i350 devices. */
- if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
- u16 ets_word;
+ if (hw->mac.type == e1000_i350) {
+ if (hw->bus.func == 0) {
+ u16 ets_word;
- /* Read the NVM to determine if this i350 device supports an
- * external thermal sensor.
- */
- hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
- if (ets_word != 0x0000 && ets_word != 0xFFFF)
- adapter->ets = true;
- else
- adapter->ets = false;
- if (igb_sysfs_init(adapter))
- dev_err(&pdev->dev,
- "failed to allocate sysfs resources\n");
+ /*
+ * Read the NVM to determine if this i350 device
+ * supports an external thermal sensor.
+ */
+ e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ }
+#ifdef IGB_HWMON
+
+ igb_sysfs_init(adapter);
+#else
+#ifdef IGB_PROCFS
+
+ igb_procfs_init(adapter);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
} else {
adapter->ets = false;
}
-#endif
- /* do hw tstamp init after resetting */
- igb_ptp_init(adapter);
- dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
- /* print bus type/speed/width info, not applicable to i354 */
- if (hw->mac.type != e1000_i354) {
- dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
- netdev->name,
- ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
- (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
- "unknown"),
- ((hw->bus.width == e1000_bus_width_pcie_x4) ?
- "Width x4" :
- (hw->bus.width == e1000_bus_width_pcie_x2) ?
- "Width x2" :
- (hw->bus.width == e1000_bus_width_pcie_x1) ?
- "Width x1" : "unknown"), netdev->dev_addr);
- }
-
- ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
- if (ret_val)
- strcpy(part_str, "Unknown");
- dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
- dev_info(&pdev->dev,
- "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
- adapter->msix_entries ? "MSI-X" :
- (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
- adapter->num_rx_queues, adapter->num_tx_queues);
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_i210:
- case e1000_i211:
- igb_set_eee_i350(hw);
- break;
- case e1000_i354:
- if (hw->phy.media_type == e1000_media_type_copper) {
- if ((rd32(E1000_CTRL_EXT) &
- E1000_CTRL_EXT_LINK_MODE_SGMII))
- igb_set_eee_i354(hw);
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ switch (hw->mac.type) {
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
+ /* Enable EEE for internal copper PHY devices */
+ err = e1000_set_eee_i350(hw, true, true);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ break;
+ case e1000_i354:
+ if ((E1000_READ_REG(hw, E1000_CTRL_EXT)) &
+ (E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ err = e1000_set_eee_i354(hw, true, true);
+ if ((!err) &&
+ (adapter->flags & IGB_FLAG_EEE))
+ adapter->eee_advert =
+ MDIO_EEE_100TX | MDIO_EEE_1000T;
+ }
+ break;
+ default:
+ break;
}
- break;
- default:
- break;
}
+ /* send driver version info to firmware */
+ if ((hw->mac.type >= e1000_i350) &&
+ (e1000_get_flash_presence_i210(hw)))
+ igb_init_fw(adapter);
+
+#ifndef IGB_NO_LRO
+ if (netdev->features & NETIF_F_LRO)
+ dev_info(pci_dev_to_dev(pdev), "Internal LRO is enabled\n");
+ else
+ dev_info(pci_dev_to_dev(pdev), "LRO is disabled\n");
+#endif
+ dev_info(pci_dev_to_dev(pdev),
+ "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+ adapter->msix_entries ? "MSI-X" :
+ (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+ adapter->num_rx_queues, adapter->num_tx_queues);
+
+ cards_found++;
+
pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
igb_release_hw_control(adapter);
+#ifdef HAVE_I2C_SUPPORT
memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+#endif /* HAVE_I2C_SUPPORT */
+#if !defined (CONFIG_MACH_APALIS_T30) && !defined (CONFIG_MACH_APALIS_TK1)
err_eeprom:
- if (!igb_check_reset_block(hw))
- igb_reset_phy(hw);
+#endif
+ if (!e1000_check_reset_block(hw))
+ e1000_phy_hw_reset(hw);
if (hw->flash_address)
iounmap(hw->flash_address);
err_sw_init:
+ kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
- iounmap(hw->hw_addr);
+ igb_reset_sriov_capability(adapter);
+ iounmap(adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -2390,118 +3252,28 @@ err_dma:
pci_disable_device(pdev);
return err;
}
-
-#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
-
- /* reclaim resources allocated to VFs */
- if (adapter->vf_data) {
- /* disable iov and allow time for transactions to clear */
- if (pci_vfs_assigned(pdev)) {
- dev_warn(&pdev->dev,
- "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
- return -EPERM;
- } else {
- pci_disable_sriov(pdev);
- msleep(500);
- }
-
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
- wrfl();
- msleep(100);
- dev_info(&pdev->dev, "IOV Disabled\n");
-
- /* Re-enable DMA Coalescing flag since IOV is turned off */
- adapter->flags |= IGB_FLAG_DMAC;
- }
-
- return 0;
-}
-
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- int old_vfs = pci_num_vf(pdev);
- int err = 0;
- int i;
-
- if (!num_vfs)
- goto out;
- else if (old_vfs && old_vfs == num_vfs)
- goto out;
- else if (old_vfs && old_vfs != num_vfs)
- err = igb_disable_sriov(pdev);
-
- if (err)
- goto out;
-
- if (num_vfs > 7) {
- err = -EPERM;
- goto out;
- }
-
- adapter->vfs_allocated_count = num_vfs;
-
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage), GFP_KERNEL);
-
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev,
- "Unable to allocate memory for VF Data Storage\n");
- err = -ENOMEM;
- goto out;
- }
-
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
-
- dev_info(&pdev->dev, "%d VFs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++)
- igb_vf_configure(adapter, i);
-
- /* DMA Coalescing is not supported in IOV mode. */
- adapter->flags &= ~IGB_FLAG_DMAC;
- goto out;
-
-err_out:
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
-out:
- return err;
-}
-
-#endif
-/**
+#ifdef HAVE_I2C_SUPPORT
+/*
* igb_remove_i2c - Cleanup I2C interface
* @adapter: pointer to adapter structure
- **/
+ *
+ */
static void igb_remove_i2c(struct igb_adapter *adapter)
{
+
/* free the adapter bus structure */
i2c_del_adapter(&adapter->i2c_adap);
}
+#endif /* HAVE_I2C_SUPPORT */
/**
- * igb_remove - Device Removal Routine
- * @pdev: PCI device information struct
+ * igb_remove - Device Removal Routine
+ * @pdev: PCI device information struct
*
- * igb_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
+ * igb_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
**/
static void igb_remove(struct pci_dev *pdev)
{
@@ -2510,30 +3282,39 @@ static void igb_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_HWMON
- igb_sysfs_exit(adapter);
-#endif
+#ifdef HAVE_I2C_SUPPORT
igb_remove_i2c(adapter);
+#endif /* HAVE_I2C_SUPPORT */
+#ifdef HAVE_PTP_1588_CLOCK
igb_ptp_stop(adapter);
- /* The watchdog timer may be rescheduled, so explicitly
- * disable watchdog from being rescheduled.
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /* flush_scheduled work may reschedule our watchdog task, so
+ * explicitly disable watchdog tasks from being rescheduled
*/
set_bit(__IGB_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ del_timer_sync(&adapter->dma_err_timer);
del_timer_sync(&adapter->phy_info_timer);
- cancel_work_sync(&adapter->reset_task);
- cancel_work_sync(&adapter->watchdog_task);
+ flush_scheduled_work();
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
- dev_info(&pdev->dev, "DCA disabled\n");
+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
dca_remove_requester(&pdev->dev);
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_DISABLE);
}
#endif
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ igb_remove_vmdq_netdevs(adapter);
+#endif
+
+ igb_reset_sriov_capability(adapter);
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -2543,16 +3324,21 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
-#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
-#endif
-
- iounmap(hw->hw_addr);
+ if (adapter->io_addr)
+ iounmap(adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
+#ifdef IGB_HWMON
+ igb_sysfs_exit(adapter);
+#else
+#ifdef IGB_PROCFS
+ igb_procfs_exit(adapter);
+#endif /* IGB_PROCFS */
+#endif /* IGB_HWMON */
+ kfree(adapter->mac_table);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -2562,102 +3348,12 @@ static void igb_remove(struct pci_dev *pdev)
}
/**
- * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
- * @adapter: board private structure to initialize
- *
- * This function initializes the vf specific data storage and then attempts to
- * allocate the VFs. The reason for ordering it this way is because it is much
- * mor expensive time wise to disable SR-IOV than it is to allocate and free
- * the memory for the VFs.
- **/
-static void igb_probe_vfs(struct igb_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
-
- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
- return;
-
- pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
-
-#endif /* CONFIG_PCI_IOV */
-}
-
-static void igb_init_queue_configuration(struct igb_adapter *adapter)
-{
- struct e1000_hw *hw = &adapter->hw;
- u32 max_rss_queues;
-
- /* Determine the maximum number of RSS queues supported. */
- switch (hw->mac.type) {
- case e1000_i211:
- max_rss_queues = IGB_MAX_RX_QUEUES_I211;
- break;
- case e1000_82575:
- case e1000_i210:
- max_rss_queues = IGB_MAX_RX_QUEUES_82575;
- break;
- case e1000_i350:
- /* I350 cannot do RSS and SR-IOV at the same time */
- if (!!adapter->vfs_allocated_count) {
- max_rss_queues = 1;
- break;
- }
- /* fall through */
- case e1000_82576:
- if (!!adapter->vfs_allocated_count) {
- max_rss_queues = 2;
- break;
- }
- /* fall through */
- case e1000_82580:
- case e1000_i354:
- default:
- max_rss_queues = IGB_MAX_RX_QUEUES;
- break;
- }
-
- adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
-
- /* Determine if we need to pair queues. */
- switch (hw->mac.type) {
- case e1000_82575:
- case e1000_i211:
- /* Device supports enough interrupts without queue pairing. */
- break;
- case e1000_82576:
- /* If VFs are going to be allocated with RSS queues then we
- * should pair the queues in order to conserve interrupts due
- * to limited supply.
- */
- if ((adapter->rss_queues > 1) &&
- (adapter->vfs_allocated_count > 6))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
- /* fall through */
- case e1000_82580:
- case e1000_i350:
- case e1000_i354:
- case e1000_i210:
- default:
- /* If rss_queues > half of max_rss_queues, pair the queues in
- * order to conserve interrupts due to limited supply.
- */
- if (adapter->rss_queues > (max_rss_queues / 2))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
- break;
- }
-}
-
-/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
*
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
**/
static int igb_sw_init(struct igb_adapter *adapter)
{
@@ -2665,84 +3361,78 @@ static int igb_sw_init(struct igb_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
/* set default ring sizes */
adapter->tx_ring_count = IGB_DEFAULT_TXD;
adapter->rx_ring_count = IGB_DEFAULT_RXD;
- /* set default ITR values */
- adapter->rx_itr_setting = IGB_DEFAULT_ITR;
- adapter->tx_itr_setting = IGB_DEFAULT_ITR;
-
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ VLAN_HLEN;
- spin_lock_init(&adapter->stats64_lock);
-#ifdef CONFIG_PCI_IOV
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- if (max_vfs > 7) {
- dev_warn(&pdev->dev,
- "Maximum of 7 VFs per PF, using max\n");
- max_vfs = adapter->vfs_allocated_count = 7;
- } else
- adapter->vfs_allocated_count = max_vfs;
- if (adapter->vfs_allocated_count)
- dev_warn(&pdev->dev,
- "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
- break;
- default:
- break;
+ /* Initialize the hardware-specific values */
+ if (e1000_setup_init_funcs(hw, TRUE)) {
+ dev_err(pci_dev_to_dev(pdev), "Hardware Initialization Failure\n");
+ return -EIO;
}
-#endif /* CONFIG_PCI_IOV */
- igb_init_queue_configuration(adapter);
+ igb_check_options(adapter);
+
+ adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
+ hw->mac.rar_entry_count,
+ GFP_ATOMIC);
/* Setup and initialize a copy of the hw vlan table array */
- adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
- GFP_ATOMIC);
+ adapter->shadow_vfta = kzalloc(sizeof(u32) * E1000_VFTA_ENTRIES,
+ GFP_ATOMIC);
+
+ /* These calls may decrease the number of queues */
+ if (hw->mac.type < e1000_i210)
+ igb_set_sriov_capability(adapter);
- /* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ dev_err(pci_dev_to_dev(pdev), "Unable to allocate memory for queues\n");
return -ENOMEM;
}
- igb_probe_vfs(adapter);
-
/* Explicitly disable IRQ since the NIC can be in any state. */
igb_irq_disable(adapter);
- if (hw->mac.type >= e1000_i350)
- adapter->flags &= ~IGB_FLAG_DMAC;
-
set_bit(__IGB_DOWN, &adapter->state);
return 0;
}
/**
- * igb_open - Called when a network interface is made active
- * @netdev: network interface device structure
+ * igb_open - Called when a network interface is made active
+ * @netdev: network interface device structure
*
- * Returns 0 on success, negative value on failure
+ * Returns 0 on success, negative value on failure
*
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
**/
static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+#ifdef CONFIG_PM_RUNTIME
struct pci_dev *pdev = adapter->pdev;
+#endif /* CONFIG_PM_RUNTIME */
int err;
int i;
@@ -2752,8 +3442,10 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return -EBUSY;
}
+#ifdef CONFIG_PM_RUNTIME
if (!resuming)
pm_runtime_get_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
netif_carrier_off(netdev);
@@ -2781,12 +3473,12 @@ static int __igb_open(struct net_device *netdev, bool resuming)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_queues);
- if (err)
- goto err_set_queues;
+ netif_set_real_num_tx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
+ adapter->num_tx_queues);
- err = netif_set_real_num_rx_queues(adapter->netdev,
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->vmdq_pools ? 1 :
adapter->num_rx_queues);
if (err)
goto err_set_queues;
@@ -2796,29 +3488,31 @@ static int __igb_open(struct net_device *netdev, bool resuming)
for (i = 0; i < adapter->num_q_vectors; i++)
napi_enable(&(adapter->q_vector[i]->napi));
+ igb_configure_lli(adapter);
/* Clear any pending interrupts. */
- rd32(E1000_ICR);
+ E1000_READ_REG(hw, E1000_ICR);
igb_irq_enable(adapter);
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
- u32 reg_data = rd32(E1000_CTRL_EXT);
+ u32 reg_data = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
- wr32(E1000_CTRL_EXT, reg_data);
+ E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg_data);
}
netif_tx_start_all_queues(netdev);
- if (!resuming)
- pm_runtime_put(&pdev->dev);
+ if (adapter->flags & IGB_FLAG_DETECT_BAD_DMA)
+ schedule_work(&adapter->dma_err_task);
/* start the watchdog. */
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
- return 0;
+ return E1000_SUCCESS;
err_set_queues:
igb_free_irq(adapter);
@@ -2830,59 +3524,72 @@ err_setup_rx:
igb_free_all_tx_resources(adapter);
err_setup_tx:
igb_reset(adapter);
+
+#ifdef CONFIG_PM_RUNTIME
if (!resuming)
pm_runtime_put(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
return err;
}
-static int igb_open(struct net_device *netdev)
+int igb_open(struct net_device *netdev)
{
return __igb_open(netdev, false);
}
/**
- * igb_close - Disables a network interface
- * @netdev: network interface device structure
+ * igb_close - Disables a network interface
+ * @netdev: network interface device structure
*
- * Returns 0, this is not allowed to fail
+ * Returns 0, this is not allowed to fail
*
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the driver's control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
**/
static int __igb_close(struct net_device *netdev, bool suspending)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PM_RUNTIME
struct pci_dev *pdev = adapter->pdev;
+#endif /* CONFIG_PM_RUNTIME */
WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+#ifdef CONFIG_PM_RUNTIME
if (!suspending)
pm_runtime_get_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
igb_down(adapter);
+
+ igb_release_hw_control(adapter);
+
igb_free_irq(adapter);
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
+#ifdef CONFIG_PM_RUNTIME
if (!suspending)
pm_runtime_put_sync(&pdev->dev);
+#endif /* CONFIG_PM_RUNTIME */
+
return 0;
}
-static int igb_close(struct net_device *netdev)
+int igb_close(struct net_device *netdev)
{
return __igb_close(netdev, false);
}
/**
- * igb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ * igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
@@ -2890,7 +3597,6 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
-
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -2901,6 +3607,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
+
if (!tx_ring->desc)
goto err;
@@ -2911,17 +3618,17 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
- tx_ring->tx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ dev_err(dev,
+ "Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
/**
- * igb_setup_all_tx_resources - wrapper to allocate Tx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
+ * igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
{
@@ -2931,7 +3638,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = igb_setup_tx_resources(adapter->tx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
+ dev_err(pci_dev_to_dev(pdev),
"Allocation for Tx Queue %u failed\n", i);
for (i--; i >= 0; i--)
igb_free_tx_resources(adapter->tx_ring[i]);
@@ -2943,8 +3650,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
}
/**
- * igb_setup_tctl - configure the transmit control registers
- * @adapter: Board private structure
+ * igb_setup_tctl - configure the transmit control registers
+ * @adapter: Board private structure
**/
void igb_setup_tctl(struct igb_adapter *adapter)
{
@@ -2952,31 +3659,48 @@ void igb_setup_tctl(struct igb_adapter *adapter)
u32 tctl;
/* disable queue 0 which is enabled by default on 82575 and 82576 */
- wr32(E1000_TXDCTL(0), 0);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(0), 0);
/* Program the Transmit Control Register */
- tctl = rd32(E1000_TCTL);
+ tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
- igb_config_collision_dist(hw);
+ e1000_config_collision_dist(hw);
/* Enable transmits */
tctl |= E1000_TCTL_EN;
- wr32(E1000_TCTL, tctl);
+ E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+static u32 igb_tx_wthresh(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ switch (hw->mac.type) {
+ case e1000_i354:
+ return 4;
+ case e1000_82576:
+ if (adapter->msix_entries)
+ return 1;
+ default:
+ break;
+ }
+
+ return 16;
}
/**
- * igb_configure_tx_ring - Configure transmit ring after Reset
- * @adapter: board private structure
- * @ring: tx ring to configure
+ * igb_configure_tx_ring - Configure transmit ring after Reset
+ * @adapter: board private structure
+ * @ring: tx ring to configure
*
- * Configure a transmit ring after a reset.
+ * Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -2984,33 +3708,33 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
int reg_idx = ring->reg_idx;
/* disable the queue */
- wr32(E1000_TXDCTL(reg_idx), 0);
- wrfl();
+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), 0);
+ E1000_WRITE_FLUSH(hw);
mdelay(10);
- wr32(E1000_TDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_tx_desc));
- wr32(E1000_TDBAL(reg_idx),
- tdba & 0x00000000ffffffffULL);
- wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+ E1000_WRITE_REG(hw, E1000_TDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_tx_desc));
+ E1000_WRITE_REG(hw, E1000_TDBAL(reg_idx),
+ tdba & 0x00000000ffffffffULL);
+ E1000_WRITE_REG(hw, E1000_TDBAH(reg_idx), tdba >> 32);
- ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
- wr32(E1000_TDH(reg_idx), 0);
+ ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
+ E1000_WRITE_REG(hw, E1000_TDH(reg_idx), 0);
writel(0, ring->tail);
txdctl |= IGB_TX_PTHRESH;
txdctl |= IGB_TX_HTHRESH << 8;
- txdctl |= IGB_TX_WTHRESH << 16;
+ txdctl |= igb_tx_wthresh(adapter) << 16;
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
- wr32(E1000_TXDCTL(reg_idx), txdctl);
+ E1000_WRITE_REG(hw, E1000_TXDCTL(reg_idx), txdctl);
}
/**
- * igb_configure_tx - Configure transmit Unit after Reset
- * @adapter: board private structure
+ * igb_configure_tx - Configure transmit Unit after Reset
+ * @adapter: board private structure
*
- * Configure the Tx unit of the MAC after a reset.
+ * Configure the Tx unit of the MAC after a reset.
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
@@ -3021,28 +3745,30 @@ static void igb_configure_tx(struct igb_adapter *adapter)
}
/**
- * igb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ * igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int size;
+ int size, desc_len;
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
-
rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
+ desc_len = sizeof(union e1000_adv_rx_desc);
+
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
+ rx_ring->size = rx_ring->count * desc_len;
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
+
if (!rx_ring->desc)
goto err;
@@ -3055,16 +3781,17 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ dev_err(dev,
+ "Unable to allocate memory for the receive descriptor ring\n");
return -ENOMEM;
}
/**
- * igb_setup_all_rx_resources - wrapper to allocate Rx resources
- * (Descriptors) for all queues
- * @adapter: board private structure
+ * igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ * (Descriptors) for all queues
+ * @adapter: board private structure
*
- * Return 0 on success, negative on failure
+ * Return 0 on success, negative on failure
**/
static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
{
@@ -3074,7 +3801,7 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = igb_setup_rx_resources(adapter->rx_ring[i]);
if (err) {
- dev_err(&pdev->dev,
+ dev_err(pci_dev_to_dev(pdev),
"Allocation for Rx Queue %u failed\n", i);
for (i--; i >= 0; i--)
igb_free_rx_resources(adapter->rx_ring[i]);
@@ -3086,14 +3813,17 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
}
/**
- * igb_setup_mrqc - configure the multiple receive queue control registers
- * @adapter: Board private structure
+ * igb_setup_mrqc - configure the multiple receive queue control registers
+ * @adapter: Board private structure
**/
static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0;
+ u32 j, num_rx_queues;
+#ifndef ETHTOOL_SRXFHINDIR
+ u32 shift = 0, shift2 = 0;
+#endif /* ETHTOOL_SRXFHINDIR */
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3101,17 +3831,36 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Fill out hash function seeds */
for (j = 0; j < 10; j++)
- wr32(E1000_RSSRK(j), rsskey[j]);
+ E1000_WRITE_REG(hw, E1000_RSSRK(j), rsskey[j]);
num_rx_queues = adapter->rss_queues;
+#ifdef ETHTOOL_SRXFHINDIR
+ if (hw->mac.type == e1000_82576) {
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_rx_queues = 2;
+ }
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
+ }
+ igb_write_rss_indir_tbl(adapter);
+#else
+ /* 82575 and 82576 supports 2 RSS queues for VMDq */
switch (hw->mac.type) {
case e1000_82575:
+ if (adapter->vmdq_pools) {
+ shift = 2;
+ shift2 = 6;
+ }
shift = 6;
break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count) {
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
shift = 3;
num_rx_queues = 2;
}
@@ -3120,7 +3869,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
break;
}
- /* Populate the indirection table 4 entries at a time. To do this
+ /*
+ * Populate the redirection table 4 entries at a time. To do this
* we are generating the results for n and n+2 and then interleaving
* those with the results with n+1 and n+3.
*/
@@ -3133,14 +3883,20 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
base += 0x00010001 * num_rx_queues;
reta |= (base & 0x07800780) << (1 + shift);
- wr32(E1000_RETA(j), reta);
+ /* generate 2nd table for 82575 based parts */
+ if (shift2)
+ reta |= (0x01010101 * num_rx_queues) << shift2;
+
+ E1000_WRITE_REG(hw, E1000_RETA(j), reta);
}
+#endif /* ETHTOOL_SRXFHINDIR */
- /* Disable raw packet checksumming so that RSS hash is placed in
+ /*
+ * Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
* offloads as they are enabled by default
*/
- rxcsum = rd32(E1000_RXCSUM);
+ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
rxcsum |= E1000_RXCSUM_PCSD;
if (adapter->hw.mac.type >= e1000_82576)
@@ -3148,7 +3904,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
rxcsum |= E1000_RXCSUM_CRCOFL;
/* Don't need to set TUOFL or IPOFL, they default to 1 */
- wr32(E1000_RXCSUM, rxcsum);
+ E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Generate RSS hash based on packet types, TCP/UDP
* port numbers and/or IPv4/v6 src and dst addresses
@@ -3168,39 +3924,39 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue
*/
- if (adapter->vfs_allocated_count) {
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools) {
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
- u32 vtctl = rd32(E1000_VT_CTL);
+ u32 vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
E1000_VT_CTL_DEFAULT_POOL_SHIFT;
- wr32(E1000_VT_CTL, vtctl);
+ E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
}
if (adapter->rss_queues > 1)
mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
else
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
- if (hw->mac.type != e1000_i211)
- mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+ mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
}
igb_vmm_control(adapter);
- wr32(E1000_MRQC, mrqc);
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
}
/**
- * igb_setup_rctl - configure the receive control registers
- * @adapter: Board private structure
+ * igb_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
**/
void igb_setup_rctl(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
@@ -3208,7 +3964,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
- /* enable stripping of CRC. It's unlikely this will break BMC
+ /*
+ * enable stripping of CRC. It's unlikely this will break BMC
* redirection as it did with e1000. Newer features require
* that the HW strips the CRC.
*/
@@ -3221,7 +3978,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* disable queue 0 to prevent tail write w/o re-config */
- wr32(E1000_RXDCTL(0), 0);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(0), 0);
/* Attention!!! For SR-IOV PF driver operations you must enable
* queue drop for all VF and PF queues to prevent head of line blocking
@@ -3229,31 +3986,14 @@ void igb_setup_rctl(struct igb_adapter *adapter)
*/
if (adapter->vfs_allocated_count) {
/* set all queue drop enable bits */
- wr32(E1000_QDE, ALL_QUEUES);
+ E1000_WRITE_REG(hw, E1000_QDE, ALL_QUEUES);
}
- /* This is useful for sniffing bad packets. */
- if (adapter->netdev->features & NETIF_F_RXALL) {
- /* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode
- */
- rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
- E1000_RCTL_BAM | /* RX All Bcast Pkts */
- E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
-
- rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
- E1000_RCTL_DPF | /* Allow filtered pause */
- E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
- /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
- * and that breaks VLANs.
- */
- }
-
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -3263,21 +4003,31 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
*/
if (vfn < adapter->vfs_allocated_count &&
adapter->vf_data[vfn].vlans_enabled)
- size += VLAN_TAG_SIZE;
+ size += VLAN_HLEN;
- vmolr = rd32(E1000_VMOLR(vfn));
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (vfn >= adapter->vfs_allocated_count) {
+ int queue = vfn - adapter->vfs_allocated_count;
+ struct igb_vmdq_adapter *vadapter;
+
+ vadapter = netdev_priv(adapter->vmdq_netdev[queue-1]);
+ if (vadapter->vlgrp)
+ size += VLAN_HLEN;
+ }
+#endif
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
vmolr &= ~E1000_VMOLR_RLPML_MASK;
vmolr |= size | E1000_VMOLR_LPE;
- wr32(E1000_VMOLR(vfn), vmolr);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
return 0;
}
/**
- * igb_rlpml_set - set maximum receive packet size
- * @adapter: board private structure
+ * igb_rlpml_set - set maximum receive packet size
+ * @adapter: board private structure
*
- * Configure maximum receivable packet size.
+ * Configure maximum receivable packet size.
**/
static void igb_rlpml_set(struct igb_adapter *adapter)
{
@@ -3285,9 +4035,13 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u16 pf_id = adapter->vfs_allocated_count;
- if (pf_id) {
- igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
- /* If we're in VMDQ or SR-IOV mode, then set global RLPML
+ if (adapter->vmdq_pools && hw->mac.type != e1000_82575) {
+ int i;
+
+ for (i = 0; i < adapter->vmdq_pools; i++)
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id + i);
+ /*
+ * If we're in VMDQ or SR-IOV mode, then set global RLPML
* to our max jumbo frame size, in case we need to enable
* jumbo frames on one of the rings later.
* This will not pass over-length frames into the default
@@ -3295,49 +4049,73 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
*/
max_frame_size = MAX_JUMBO_FRAME_SIZE;
}
+ /* Set VF RLPML for the PF device. */
+ if (adapter->vfs_allocated_count)
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
- wr32(E1000_RLPML, max_frame_size);
+ E1000_WRITE_REG(hw, E1000_RLPML, max_frame_size);
}
+static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
+ int vfn, bool enable)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 val;
+ void __iomem *reg;
+
+ if (hw->mac.type < e1000_82576)
+ return;
+
+ if (hw->mac.type == e1000_i350)
+ reg = hw->hw_addr + E1000_DVMOLR(vfn);
+ else
+ reg = hw->hw_addr + E1000_VMOLR(vfn);
+
+ val = readl(reg);
+ if (enable)
+ val |= E1000_VMOLR_STRVLAN;
+ else
+ val &= ~(E1000_VMOLR_STRVLAN);
+ writel(val, reg);
+}
static inline void igb_set_vmolr(struct igb_adapter *adapter,
int vfn, bool aupe)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
- /* This register exists only on 82576 and newer so if we are older then
+ /*
+ * This register exists only on 82576 and newer so if we are older then
* we should exit and do nothing
*/
if (hw->mac.type < e1000_82576)
return;
- vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(vfn));
+
if (aupe)
- vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
else
vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
/* clear all bits that might not be set */
- vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+ vmolr &= ~E1000_VMOLR_RSSE;
if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
- /* for VMDq only allow the VFs and pool 0 to accept broadcast and
- * multicast packets
- */
- if (vfn <= adapter->vfs_allocated_count)
- vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
- wr32(E1000_VMOLR(vfn), vmolr);
+ vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+ vmolr |= E1000_VMOLR_LPE; /* Accept long packets */
+
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
}
/**
- * igb_configure_rx_ring - Configure a receive ring after Reset
- * @adapter: board private structure
- * @ring: receive ring to be configured
+ * igb_configure_rx_ring - Configure a receive ring after Reset
+ * @adapter: board private structure
+ * @ring: receive ring to be configured
*
- * Configure the Rx unit of the MAC after a reset.
+ * Configure the Rx unit of the MAC after a reset.
**/
void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
@@ -3347,32 +4125,67 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /*
+ * RLPML prevents us from receiving a frame larger than max_frame so
+ * it is safe to just set the rx_buffer_len to max_frame without the
+ * risk of an skb over panic.
+ */
+ ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
+ MAXIMUM_ETHERNET_VLAN_SIZE);
+
+#endif
/* disable the queue */
- wr32(E1000_RXDCTL(reg_idx), 0);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), 0);
/* Set DMA base address registers */
- wr32(E1000_RDBAL(reg_idx),
- rdba & 0x00000000ffffffffULL);
- wr32(E1000_RDBAH(reg_idx), rdba >> 32);
- wr32(E1000_RDLEN(reg_idx),
- ring->count * sizeof(union e1000_adv_rx_desc));
+ E1000_WRITE_REG(hw, E1000_RDBAL(reg_idx),
+ rdba & 0x00000000ffffffffULL);
+ E1000_WRITE_REG(hw, E1000_RDBAH(reg_idx), rdba >> 32);
+ E1000_WRITE_REG(hw, E1000_RDLEN(reg_idx),
+ ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
- ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
- wr32(E1000_RDH(reg_idx), 0);
+ ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
+ E1000_WRITE_REG(hw, E1000_RDH(reg_idx), 0);
writel(0, ring->tail);
+ /* reset next-to- use/clean to place SW in sync with hardwdare */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ ring->next_to_alloc = 0;
+
+#endif
/* set descriptor configuration */
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+ srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
+ E1000_SRRCTL_BSIZEPKT_SHIFT;
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+#ifdef HAVE_PTP_1588_CLOCK
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
- /* Only set Drop Enable if we are supporting multiple queues */
- if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
+#endif /* HAVE_PTP_1588_CLOCK */
+ /*
+ * We should set the drop enable bit if:
+ * SR-IOV is enabled
+ * or
+ * Flow Control is disabled and number of RX queues > 1
+ *
+ * This allows us to avoid head of line blocking for security
+ * and performance reasons.
+ */
+ if (adapter->vfs_allocated_count ||
+ (adapter->num_rx_queues > 1 &&
+ (hw->fc.requested_mode == e1000_fc_none ||
+ hw->fc.requested_mode == e1000_fc_rx_pause)))
srrctl |= E1000_SRRCTL_DROP_EN;
- wr32(E1000_SRRCTL(reg_idx), srrctl);
+ E1000_WRITE_REG(hw, E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */
igb_set_vmolr(adapter, reg_idx & 0x7, true);
@@ -3383,14 +4196,14 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
- wr32(E1000_RXDCTL(reg_idx), rxdctl);
+ E1000_WRITE_REG(hw, E1000_RXDCTL(reg_idx), rxdctl);
}
/**
- * igb_configure_rx - Configure receive Unit after Reset
- * @adapter: board private structure
+ * igb_configure_rx - Configure receive Unit after Reset
+ * @adapter: board private structure
*
- * Configure the Rx unit of the MAC after a reset.
+ * Configure the Rx unit of the MAC after a reset.
**/
static void igb_configure_rx(struct igb_adapter *adapter)
{
@@ -3411,10 +4224,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
}
/**
- * igb_free_tx_resources - Free Tx Resources per Queue
- * @tx_ring: Tx descriptor ring for a specific queue
+ * igb_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
*
- * Free all transmit software resources
+ * Free all transmit software resources
**/
void igb_free_tx_resources(struct igb_ring *tx_ring)
{
@@ -3434,10 +4247,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
}
/**
- * igb_free_all_tx_resources - Free Tx Resources for All Queues
- * @adapter: board private structure
+ * igb_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
*
- * Free all transmit software resources
+ * Free all transmit software resources
**/
static void igb_free_all_tx_resources(struct igb_adapter *adapter)
{
@@ -3459,9 +4272,9 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
DMA_TO_DEVICE);
} else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
@@ -3470,8 +4283,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
}
/**
- * igb_clean_tx_ring - Free Tx Buffers
- * @tx_ring: ring to be cleaned
+ * igb_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
{
@@ -3501,8 +4314,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
}
/**
- * igb_clean_all_tx_rings - Free Tx Buffers for all queues
- * @adapter: board private structure
+ * igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
**/
static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
{
@@ -3513,10 +4326,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
}
/**
- * igb_free_rx_resources - Free Rx Resources
- * @rx_ring: ring to clean the resources from
+ * igb_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
*
- * Free all receive software resources
+ * Free all receive software resources
**/
void igb_free_rx_resources(struct igb_ring *rx_ring)
{
@@ -3536,10 +4349,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
}
/**
- * igb_free_all_rx_resources - Free Rx Resources for All Queues
- * @adapter: board private structure
+ * igb_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
*
- * Free all receive software resources
+ * Free all receive software resources
**/
static void igb_free_all_rx_resources(struct igb_adapter *adapter)
{
@@ -3550,25 +4363,40 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
}
/**
- * igb_clean_rx_ring - Free Rx Buffers per Queue
- * @rx_ring: ring to free buffers from
+ * igb_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
**/
-static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
unsigned long size;
u16 i;
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
if (rx_ring->skb)
dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
- if (!rx_ring->rx_buffer_info)
- return;
-
+#endif
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (buffer_info->dma) {
+ dma_unmap_single(rx_ring->dev,
+ buffer_info->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+#else
if (!buffer_info->page)
continue;
@@ -3579,6 +4407,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
__free_page(buffer_info->page);
buffer_info->page = NULL;
+#endif
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3593,8 +4422,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
}
/**
- * igb_clean_all_rx_rings - Free Rx Buffers for all queues
- * @adapter: board private structure
+ * igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
**/
static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
{
@@ -3605,11 +4434,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
}
/**
- * igb_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * igb_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
static int igb_set_mac(struct net_device *netdev, void *p)
{
@@ -3631,52 +4460,147 @@ static int igb_set_mac(struct net_device *netdev, void *p)
}
/**
- * igb_write_mc_addr_list - write multicast addresses to MTA
- * @netdev: network interface device structure
+ * igb_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
*
- * Writes multicast address list to the MTA hash table.
- * Returns: -ENOMEM on failure
- * 0 on no addresses written
- * X on writing X addresses to MTA
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
**/
-static int igb_write_mc_addr_list(struct net_device *netdev)
+int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
u8 *mta_list;
- int i;
+ int i, count;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ int vm;
+#endif
+ count = netdev_mc_count(netdev);
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
+ if (!adapter->vmdq_netdev[vm])
+ break;
+ if (!netif_running(adapter->vmdq_netdev[vm]))
+ continue;
+ count += netdev_mc_count(adapter->vmdq_netdev[vm]);
+ }
+#endif
- if (netdev_mc_empty(netdev)) {
- /* nothing to program, so clear mc list */
- igb_update_mc_addr_list(hw, NULL, 0);
- igb_restore_vf_multicasts(adapter);
+ if (!count) {
+ e1000_update_mc_addr_list(hw, NULL, 0);
return 0;
}
-
- mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+ mta_list = kzalloc(count * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
/* The shared function expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- igb_update_mc_addr_list(hw, mta_list, i);
+#else
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->dmi_addr, ETH_ALEN);
+#endif
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ for (vm = 1; vm < adapter->vmdq_pools; vm++) {
+ if (!adapter->vmdq_netdev[vm])
+ break;
+ if (!netif_running(adapter->vmdq_netdev[vm]) ||
+ !netdev_mc_count(adapter->vmdq_netdev[vm]))
+ continue;
+ netdev_for_each_mc_addr(ha, adapter->vmdq_netdev[vm])
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ ha->addr, ETH_ALEN);
+#else
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ ha->dmi_addr, ETH_ALEN);
+#endif
+ }
+#endif
+ e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev_mc_count(netdev);
+ return count;
+}
+
+void igb_full_sync_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++)
+ igb_rar_set(adapter, i);
+}
+
+void igb_sync_mac_table(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_MODIFIED)
+ igb_rar_set(adapter, i);
+ adapter->mac_table[i].state &= ~(IGB_MAC_STATE_MODIFIED);
+ }
+}
+
+int igb_available_rars(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+ u8 qsel)
+{
+ u32 rar_low, rar_high;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* HW expects these in little endian so we reverse the byte order
+ * from network order (big endian) to little endian
+ */
+ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+ ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+ rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+ /* Indicate to hardware the Address is Valid. */
+ rar_high |= E1000_RAH_AV;
+
+ if (hw->mac.type == e1000_82575)
+ rar_high |= E1000_RAH_POOL_1 * qsel;
+ else
+ rar_high |= E1000_RAH_POOL_1 << qsel;
+
+ E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+ E1000_WRITE_FLUSH(hw);
+ E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+ E1000_WRITE_FLUSH(hw);
}
+#ifdef HAVE_SET_RX_MODE
/**
- * igb_write_uc_addr_list - write unicast addresses to RAR table
- * @netdev: network interface device structure
+ * igb_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Writes unicast address list to the RAR table.
- * Returns: -ENOMEM on failure/insufficient address space
- * 0 on no addresses written
- * X on writing X addresses to the RAR table
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
static int igb_write_uc_addr_list(struct net_device *netdev)
{
@@ -3687,39 +4611,48 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
return -ENOMEM;
-
if (!netdev_uc_empty(netdev) && rar_entries) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
struct netdev_hw_addr *ha;
-
+#else
+ struct dev_mc_list *ha;
+#endif
netdev_for_each_uc_addr(ha, netdev) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
rar_entries--,
vfn);
+#else
+ igb_rar_set_qsel(adapter, ha->da_addr,
+ rar_entries--,
+ vfn);
+#endif
count++;
}
}
+
/* write the addresses in reverse order to avoid write combining */
for (; rar_entries > 0 ; rar_entries--) {
- wr32(E1000_RAH(rar_entries), 0);
- wr32(E1000_RAL(rar_entries), 0);
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entries), 0);
+ E1000_WRITE_REG(hw, E1000_RAL(rar_entries), 0);
}
- wrfl();
-
+ E1000_WRITE_FLUSH(hw);
return count;
}
+#endif /* HAVE_SET_RX_MODE */
/**
- * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
- * @netdev: network interface device structure
+ * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
*
- * The set_rx_mode entry point is called whenever the unicast or multicast
- * address lists or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper unicast, multicast,
- * promiscuous mode, and all-multi behavior.
+ * The set_rx_mode entry point is called whenever the unicast or multicast
+ * address lists or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
+ * promiscuous mode, and all-multi behavior.
**/
static void igb_set_rx_mode(struct net_device *netdev)
{
@@ -3730,24 +4663,24 @@ static void igb_set_rx_mode(struct net_device *netdev)
int count;
/* Check for Promiscuous and All Multicast modes */
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
/* clear the effected bits */
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
if (netdev->flags & IFF_PROMISC) {
- u32 mrqc = rd32(E1000_MRQC);
- /* retain VLAN HW filtering if in VT mode */
- if (mrqc & E1000_MRQC_ENABLE_VMDQ)
- rctl |= E1000_RCTL_VFE;
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+ /* retain VLAN HW filtering if in VT mode */
+ if (adapter->vfs_allocated_count || adapter->vmdq_pools)
+ rctl |= E1000_RCTL_VFE;
} else {
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
vmolr |= E1000_VMOLR_MPME;
} else {
- /* Write addresses to the MTA, if the attempt fails
+ /*
+ * Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
@@ -3759,7 +4692,9 @@ static void igb_set_rx_mode(struct net_device *netdev)
vmolr |= E1000_VMOLR_ROMPE;
}
}
- /* Write addresses to available RAR registers, if there is not
+#ifdef HAVE_SET_RX_MODE
+ /*
+ * Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
@@ -3768,21 +4703,23 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_UPE;
vmolr |= E1000_VMOLR_ROPE;
}
+#endif /* HAVE_SET_RX_MODE */
rctl |= E1000_RCTL_VFE;
}
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- /* In order to support SR-IOV and eventually VMDq it is necessary to set
+ /*
+ * In order to support SR-IOV and eventually VMDq it is necessary to set
* the VMOLR to enable the appropriate modes. Without this workaround
* we will have issues with VLAN tag stripping not being done for frames
* that are only arriving because we are the default pool
*/
- if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
+ if (hw->mac.type < e1000_82576)
return;
- vmolr |= rd32(E1000_VMOLR(vfn)) &
- ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
- wr32(E1000_VMOLR(vfn), vmolr);
+ vmolr |= E1000_READ_REG(hw, E1000_VMOLR(vfn)) &
+ ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vfn), vmolr);
igb_restore_vf_multicasts(adapter);
}
@@ -3794,7 +4731,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = E1000_READ_REG(hw, E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -3813,15 +4751,34 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
- dev_warn(&adapter->pdev->dev,
- "Spoof event(s) detected on VF %d\n", j);
- adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ switch (adapter->hw.mac.type) {
+ case e1000_82576:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j) ||
+ adapter->wvbr & (1 << (j
+ + IGB_STAGGERED_QUEUE_OFFSET))) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n",
+ j);
+ adapter->wvbr &=
+ ~((1 << j) |
+ (1 << (j +
+ IGB_STAGGERED_QUEUE_OFFSET)));
+ }
}
+ break;
+ case e1000_i350:
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
+ if (adapter->wvbr & (1 << j)) {
+ DPRINTK(DRV, WARNING,
+ "Spoof event(s) detected on VF %d\n",
+ j);
+ adapter->wvbr &= ~(1 << j);
+ }
+ }
+ break;
+ default:
+ break;
}
}
@@ -3831,18 +4788,18 @@ static void igb_spoof_check(struct igb_adapter *adapter)
static void igb_update_phy_info(unsigned long data)
{
struct igb_adapter *adapter = (struct igb_adapter *) data;
- igb_get_phy_info(&adapter->hw);
+
+ e1000_get_phy_info(&adapter->hw);
}
/**
- * igb_has_link - check shared code for link and determine up/down
- * @adapter: pointer to driver private info
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
**/
bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- bool link_active = false;
- s32 ret_val = 0;
+ bool link_active = FALSE;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
@@ -3851,46 +4808,34 @@ bool igb_has_link(struct igb_adapter *adapter)
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = true;
- }
- break;
+ if (!hw->mac.get_link_status)
+ return true;
case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = hw->mac.serdes_has_link;
+ e1000_check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
break;
- default:
case e1000_media_type_unknown:
+ default:
break;
}
- return link_active;
-}
-
-static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
-{
- bool ret = false;
- u32 ctrl_ext, thstat;
-
- /* check for thermal sensor event on i350 copper only */
- if (hw->mac.type == e1000_i350) {
- thstat = rd32(E1000_THSTAT);
- ctrl_ext = rd32(E1000_CTRL_EXT);
-
- if ((hw->phy.media_type == e1000_media_type_copper) &&
- !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
- ret = !!(thstat & event);
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
}
- return ret;
+ return link_active;
}
/**
- * igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * igb_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
**/
static void igb_watchdog(unsigned long data)
{
@@ -3905,48 +4850,58 @@ static void igb_watchdog_task(struct work_struct *work)
struct igb_adapter,
watchdog_task);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_phy_info *phy = &hw->phy;
struct net_device *netdev = adapter->netdev;
- u32 link;
+ u32 thstat, ctrl_ext, link;
int i;
+ u32 connsw;
link = igb_has_link(adapter);
+
+ /* Force link down if we have fiber to swap to */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ if (hw->phy.media_type == e1000_media_type_copper) {
+ connsw = E1000_READ_REG(hw, E1000_CONNSW);
+ if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+ link = 0;
+ }
+ }
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = FALSE;
+ }
+
if (link) {
+ /* Perform a reset if the media type changed. */
+ if (hw->dev_spec._82575.media_changed) {
+ hw->dev_spec._82575.media_changed = false;
+ adapter->flags |= IGB_FLAG_MEDIA_RESET;
+ igb_reset(adapter);
+ }
+
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
- hw->mac.ops.get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
- ctrl = rd32(E1000_CTRL);
- /* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
- netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full" : "Half",
- (ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
- (ctrl & E1000_CTRL_RFCE) ? "RX" :
- (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
-
- /* check if SmartSpeed worked */
- igb_check_downshift(hw);
- if (phy->speed_downgraded)
- netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
-
- /* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ e1000_get_speed_and_duplex(hw,
+ &adapter->link_speed,
+ &adapter->link_duplex);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
+ /* Links status message must follow this format */
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s, Flow Control: %s\n",
+ netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full Duplex" : "Half Duplex",
+ ((ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
+ ((ctrl & E1000_CTRL_RFCE) ? "RX" :
+ ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
@@ -3956,12 +4911,17 @@ static void igb_watchdog_task(struct work_struct *work)
case SPEED_100:
/* maybe add some timeout factor ? */
break;
+ default:
+ break;
}
netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
igb_ping_all_vfs(adapter);
+#ifdef IFLA_VF_MAX
igb_check_vf_rate_limit(adapter);
+#endif /* IFLA_VF_MAX */
/* link state has changed, schedule phy info update */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -3972,18 +4932,33 @@ static void igb_watchdog_task(struct work_struct *work)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
-
- /* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ /* check for thermal sensor event on i350 */
+ if (hw->mac.type == e1000_i350) {
+ thstat = E1000_READ_REG(hw, E1000_THSTAT);
+ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ if ((hw->phy.media_type ==
+ e1000_media_type_copper) &&
+ !(ctrl_ext &
+ E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ if (thstat & E1000_THSTAT_PWR_DOWN) {
+ netdev_err(netdev,
+ "igb: %s The network adapter was stopped because it overheated.\n",
+ netdev->name);
+ }
+ if (thstat &
+ E1000_THSTAT_LINK_THROTTLE) {
+ netdev_err(netdev,
+ "igb: %s The network adapter supported link speed was downshifted because it overheated.\n",
+ netdev->name);
+ }
+ }
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
igb_ping_all_vfs(adapter);
@@ -3991,18 +4966,36 @@ static void igb_watchdog_task(struct work_struct *work)
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
-
+ /* link is down, time to check for alternate media */
+ if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
+ }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
+
+ /* also check for alternate media here */
+ } else if (!netif_carrier_ok(netdev) &&
+ (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+ hw->mac.ops.power_up_serdes(hw);
+ igb_check_swap_media(adapter);
+ if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+ schedule_work(&adapter->reset_task);
+ /* return immediately */
+ return;
+ }
}
}
- spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
- spin_unlock(&adapter->stats64_lock);
+ igb_update_stats(adapter);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
+
if (!netif_carrier_ok(netdev)) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
@@ -4021,23 +5014,92 @@ static void igb_watchdog_task(struct work_struct *work)
set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
}
- /* Cause software interrupt to ensure Rx ring is cleaned */
+ /* Cause software interrupt to ensure rx ring is cleaned */
if (adapter->msix_entries) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
- wr32(E1000_EICS, eics);
+ E1000_WRITE_REG(hw, E1000_EICS, eics);
} else {
- wr32(E1000_ICS, E1000_ICS_RXDMT0);
+ E1000_WRITE_REG(hw, E1000_ICS, E1000_ICS_RXDMT0);
}
igb_spoof_check(adapter);
- igb_ptp_rx_hang(adapter);
/* Reset the timer */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
+}
+
+static void igb_dma_err_task(struct work_struct *work)
+{
+ struct igb_adapter *adapter = container_of(work,
+ struct igb_adapter,
+ dma_err_task);
+ int vf;
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 hgptc;
+ u32 ciaa, ciad;
+
+ hgptc = E1000_READ_REG(hw, E1000_HGPTC);
+ if (hgptc) /* If incrementing then no need for the check below */
+ goto dma_timer_reset;
+ /*
+ * Check to see if a bad DMA write target from an errant or
+ * malicious VF has caused a PCIe error. If so then we can
+ * issue a VFLR to the offending VF(s) and then resume without
+ * requesting a full slot reset.
+ */
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ ciaa = (vf << 16) | 0x80000000;
+ /* 32 bit read so align, we really want status at offset 6 */
+ ciaa |= PCI_COMMAND;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ ciad = E1000_READ_REG(hw, E1000_CIAD);
+ ciaa &= 0x7FFFFFFF;
+ /* disable debug mode asap after reading data */
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ /* Get the upper 16 bits which will be the PCI status reg */
+ ciad >>= 16;
+ if (ciad & (PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ netdev_err(netdev, "VF %d suffered error\n", vf);
+ /* Issue VFLR */
+ ciaa = (vf << 16) | 0x80000000;
+ ciaa |= 0xA8;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ ciad = 0x00008000; /* VFLR */
+ E1000_WRITE_REG(hw, E1000_CIAD, ciad);
+ ciaa &= 0x7FFFFFFF;
+ E1000_WRITE_REG(hw, E1000_CIAA, ciaa);
+ }
+ }
+dma_timer_reset:
+ /* Reset the timer */
if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ mod_timer(&adapter->dma_err_timer,
+ round_jiffies(jiffies + HZ / 10));
+}
+
+/**
+ * igb_dma_err_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void igb_dma_err_timer(unsigned long data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ /* Do the rest outside of interrupt context */
+ schedule_work(&adapter->dma_err_task);
}
enum latency_range {
@@ -4048,20 +5110,20 @@ enum latency_range {
};
/**
- * igb_update_ring_itr - update the dynamic ITR value based on packet size
- * @q_vector: pointer to q_vector
+ * igb_update_ring_itr - update the dynamic ITR value based on packet size
*
- * Stores a new ITR value based on strictly on packet size. This
- * algorithm is less sophisticated than that used in igb_update_itr,
- * due to the difficulty of synchronizing statistics across multiple
- * receive rings. The divisors and thresholds used by this function
- * were determined based on theoretical maximum wire speed and testing
- * data, in order to minimize response time while increasing bulk
- * throughput.
- * This functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: This function is called only when operating in a multiqueue
- * receive environment.
+ * Stores a new ITR value based on strictly on packet size. This
+ * algorithm is less sophisticated than that used in igb_update_itr,
+ * due to the difficulty of synchronizing statistics across multiple
+ * receive rings. The divisors and thresholds used by this function
+ * were determined based on theoretical maximum wire speed and testing
+ * data, in order to minimize response time while increasing bulk
+ * throughput.
+ * This functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: This function is called only when operating in a multiqueue
+ * receive environment.
+ * @q_vector: pointer to q_vector
**/
static void igb_update_ring_itr(struct igb_q_vector *q_vector)
{
@@ -4073,9 +5135,13 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
/* For non-gigabit speeds, just fix the interrupt rate at 4000
* ints/sec - ITR timer value of 120 ticks.
*/
- if (adapter->link_speed != SPEED_1000) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
new_val = IGB_4K_ITR;
goto set_itr_val;
+ default:
+ break;
}
packets = q_vector->rx.total_packets;
@@ -4122,21 +5188,20 @@ clear_counts:
}
/**
- * igb_update_itr - update the dynamic ITR value based on statistics
- * @q_vector: pointer to q_vector
- * @ring_container: ring info to update the itr for
- *
- * Stores a new ITR value based on packets and byte
- * counts during the last interrupt. The advantage of per interrupt
- * computation is faster updates and more accurate ITR for the current
- * traffic pattern. Constants in this function were computed
- * based on theoretical maximum wire speed and thresholds were set based
- * on testing data as well as attempting to minimize response time
- * while increasing bulk throughput.
- * this functionality is controlled by the InterruptThrottleRate module
- * parameter (see igb_param.c)
- * NOTE: These calculations are only valid when operating in a single-
- * queue environment.
+ * igb_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see igb_param.c)
+ * NOTE: These calculations are only valid when operating in a single-
+ * queue environment.
+ * @q_vector: pointer to q_vector
+ * @ring_container: ring info to update the itr for
**/
static void igb_update_itr(struct igb_q_vector *q_vector,
struct igb_ring_container *ring_container)
@@ -4198,10 +5263,14 @@ static void igb_set_itr(struct igb_q_vector *q_vector)
u8 current_itr = 0;
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
- if (adapter->link_speed != SPEED_1000) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
current_itr = 0;
new_itr = IGB_4K_ITR;
goto set_itr_now;
+ default:
+ break;
}
igb_update_itr(q_vector, &q_vector->tx);
@@ -4237,9 +5306,9 @@ set_itr_now:
* increasing
*/
new_itr = new_itr > q_vector->itr_val ?
- max((new_itr * q_vector->itr_val) /
- (new_itr + (q_vector->itr_val >> 2)),
- new_itr) : new_itr;
+ max((new_itr * q_vector->itr_val) /
+ (new_itr + (q_vector->itr_val >> 2)),
+ new_itr) : new_itr;
/* Don't write the value here; it resets the adapter's
* internal timer, and causes us to delay far longer than
* we should between interrupts. Instead, we write the ITR
@@ -4251,8 +5320,8 @@ set_itr_now:
}
}
-static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -4279,6 +5348,7 @@ static int igb_tso(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
u8 *hdr_len)
{
+#ifdef NETIF_F_TSO
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
@@ -4287,10 +5357,13 @@ static int igb_tso(struct igb_ring *tx_ring,
return 0;
if (!skb_is_gso(skb))
+#endif /* NETIF_F_TSO */
return 0;
+#ifdef NETIF_F_TSO
if (skb_header_cloned(skb)) {
int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
if (err)
return err;
}
@@ -4298,8 +5371,9 @@ static int igb_tso(struct igb_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == __constant_htons(ETH_P_IP)) {
+ if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
+
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
@@ -4310,6 +5384,7 @@ static int igb_tso(struct igb_ring *tx_ring,
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4;
+#ifdef NETIF_F_TSO6
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -4317,6 +5392,7 @@ static int igb_tso(struct igb_ring *tx_ring,
0, IPPROTO_TCP, 0);
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM;
+#endif
}
/* compute header lengths */
@@ -4339,6 +5415,7 @@ static int igb_tso(struct igb_ring *tx_ring,
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
return 1;
+#endif /* NETIF_F_TSO */
}
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
@@ -4352,37 +5429,42 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
return;
} else {
- u8 l4_hdr = 0;
+ u8 nexthdr = 0;
+
switch (first->protocol) {
case __constant_htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
+ nexthdr = ip_hdr(skb)->protocol;
break;
+#ifdef NETIF_F_IPV6_CSUM
case __constant_htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
+ nexthdr = ipv6_hdr(skb)->nexthdr;
break;
+#endif
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
+ "partial checksum but proto=%x!\n",
+ first->protocol);
}
break;
}
- switch (l4_hdr) {
+ switch (nexthdr) {
case IPPROTO_TCP:
type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
mss_l4len_idx = tcp_hdrlen(skb) <<
E1000_ADVTXD_L4LEN_SHIFT;
break;
+#ifdef HAVE_SCTP
case IPPROTO_SCTP:
type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
mss_l4len_idx = sizeof(struct sctphdr) <<
E1000_ADVTXD_L4LEN_SHIFT;
break;
+#endif
case IPPROTO_UDP:
mss_l4len_idx = sizeof(struct udphdr) <<
E1000_ADVTXD_L4LEN_SHIFT;
@@ -4390,8 +5472,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum but l4 proto=%x!\n",
+ nexthdr);
}
break;
}
@@ -4430,9 +5512,6 @@ static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
(E1000_ADVTXD_MAC_TSTAMP));
- /* insert frame checksum */
- cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
-
return cmd_type;
}
@@ -4539,11 +5618,11 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* set the timestamp */
first->time_stamp = jiffies;
- /* Force memory writes to complete before letting h/w know there
+ /*
+ * Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
*
@@ -4564,7 +5643,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
- * at a time, it synchronizes IO on IA64/Altix systems
+ * at a time, it syncronizes IO on IA64/Altix systems
*/
mmiowb();
@@ -4589,9 +5668,12 @@ dma_error:
static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
{
- struct net_device *netdev = tx_ring->netdev;
+ struct net_device *netdev = netdev_ring(tx_ring);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ if (netif_is_multiqueue(netdev))
+ netif_stop_subqueue(netdev, ring_queue_index(tx_ring));
+ else
+ netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
@@ -4606,11 +5688,12 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
return -EBUSY;
/* A reprieve! */
- netif_wake_subqueue(netdev, tx_ring->queue_index);
+ if (netif_is_multiqueue(netdev))
+ netif_wake_subqueue(netdev, ring_queue_index(tx_ring));
+ else
+ netif_wake_queue(netdev);
- u64_stats_update_begin(&tx_ring->tx_syncp2);
- tx_ring->tx_stats.restart_queue2++;
- u64_stats_update_end(&tx_ring->tx_syncp2);
+ tx_ring->tx_stats.restart_queue++;
return 0;
}
@@ -4628,24 +5711,26 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor,
* otherwise try next time
*/
- if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
- unsigned short f;
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- } else {
- count += skb_shinfo(skb)->nr_frags;
- }
-
+#if PAGE_SIZE > IGB_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
@@ -4657,13 +5742,21 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef SKB_SHARED_TX_IS_UNION
+ if (unlikely(skb_tx(skb)->hardware)) {
+#else
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+#endif
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- if (!(adapter->ptp_tx_skb)) {
+ if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+ &adapter->state)) {
+#ifdef SKB_SHARED_TX_IS_UNION
+ skb_tx(skb)->in_progress = 1;
+#else
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
tx_flags |= IGB_TX_FLAGS_TSTAMP;
adapter->ptp_tx_skb = skb_get(skb);
@@ -4672,10 +5765,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
schedule_work(&adapter->ptp_tx_work);
}
}
-
- if (vlan_tx_tag_present(skb)) {
+#endif /* HAVE_PTP_1588_CLOCK */
+ skb_tx_timestamp(skb);
+ if (skb_vlan_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
- tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
}
/* record initial flags and protocol */
@@ -4690,6 +5784,10 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
+#ifndef HAVE_TRANS_START_IN_QUEUE
+ netdev_ring(tx_ring)->trans_start = jiffies;
+
+#endif
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
@@ -4701,6 +5799,7 @@ out_drop:
return NETDEV_TX_OK;
}
+#ifdef HAVE_TX_MQ
static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
struct sk_buff *skb)
{
@@ -4711,6 +5810,9 @@ static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
return adapter->tx_ring[r_idx];
}
+#else
+#define igb_tx_queue_mapping(_adapter, _skb) ((_adapter)->tx_ring[0])
+#endif
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
@@ -4727,22 +5829,22 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
+ /*
+ * The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (unlikely(skb->len < 17)) {
- if (skb_pad(skb, 17 - skb->len))
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
return NETDEV_TX_OK;
skb->len = 17;
- skb_set_tail_pointer(skb, 17);
}
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
}
/**
- * igb_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
+ * igb_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
**/
static void igb_tx_timeout(struct net_device *netdev)
{
@@ -4756,64 +5858,73 @@ static void igb_tx_timeout(struct net_device *netdev)
hw->dev_spec._82575.global_device_reset = true;
schedule_work(&adapter->reset_task);
- wr32(E1000_EICS,
- (adapter->eims_enable_mask & ~adapter->eims_other));
+ E1000_WRITE_REG(hw, E1000_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
}
static void igb_reset_task(struct work_struct *work)
{
struct igb_adapter *adapter;
+
adapter = container_of(work, struct igb_adapter, reset_task);
- igb_dump(adapter);
- netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
}
/**
- * igb_get_stats64 - Get System Network Statistics
- * @netdev: network interface device structure
- * @stats: rtnl_link_stats64 pointer
+ * igb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are updated here and also from the timer callback.
**/
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+static struct net_device_stats *igb_get_stats(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, &adapter->stats64);
- memcpy(stats, &adapter->stats64, sizeof(*stats));
- spin_unlock(&adapter->stats64_lock);
+ if (!test_bit(__IGB_RESETTING, &adapter->state))
+ igb_update_stats(adapter);
- return stats;
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ /* only return the current stats */
+ return &netdev->stats;
+#else
+ /* only return the current stats */
+ return &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
}
/**
- * igb_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
+ * igb_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative on failure
**/
static int igb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_err(&pdev->dev, "Invalid MTU setting\n");
+ dev_err(pci_dev_to_dev(pdev), "Invalid MTU setting\n");
return -EINVAL;
}
#define MAX_STD_JUMBO_FRAME_SIZE 9238
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
- dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
+ dev_err(pci_dev_to_dev(pdev), "MTU > 9216 not supported.\n");
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -4821,9 +5932,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
igb_down(adapter);
- dev_info(&pdev->dev, "changing MTU from %d to %d\n",
+ dev_info(pci_dev_to_dev(pdev), "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
+ hw->dev_spec._82575.mtu = new_mtu;
if (netif_running(netdev))
igb_up(adapter);
@@ -4836,49 +5948,74 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * igb_update_stats - Update the board statistics counters
- * @adapter: board private structure
+ * igb_update_stats - Update the board statistics counters
+ * @adapter: board private structure
**/
-void igb_update_stats(struct igb_adapter *adapter,
- struct rtnl_link_stats64 *net_stats)
+
+void igb_update_stats(struct igb_adapter *adapter)
{
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &adapter->netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif /* HAVE_NETDEV_STATS_IN_NETDEV */
struct e1000_hw *hw = &adapter->hw;
+#ifdef HAVE_PCI_ERS
struct pci_dev *pdev = adapter->pdev;
+#endif
u32 reg, mpc;
u16 phy_tmp;
int i;
u64 bytes, packets;
- unsigned int start;
- u64 _bytes, _packets;
+#ifndef IGB_NO_LRO
+ u32 flushed = 0, coal = 0;
+ struct igb_q_vector *q_vector;
+#endif
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /* Prevent stats update while adapter is being reset, or if the pci
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
return;
+#ifdef HAVE_PCI_ERS
if (pci_channel_offline(pdev))
return;
+#endif
+#ifndef IGB_NO_LRO
+ for (i = 0; i < adapter->num_q_vectors; i++) {
+ q_vector = adapter->q_vector[i];
+ if (!q_vector)
+ continue;
+ flushed += q_vector->lrolist.stats.flushed;
+ coal += q_vector->lrolist.stats.coal;
+ }
+ adapter->lro_stats.flushed = flushed;
+ adapter->lro_stats.coal = coal;
+
+#endif
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
-
- if (rqdpc) {
- ring->rx_stats.drops += rqdpc;
- net_stats->rx_fifo_errors += rqdpc;
+ u32 rqdpc_tmp = E1000_READ_REG(hw, E1000_RQDPC(i)) & 0x0FFF;
+
+ if (hw->mac.type >= e1000_i210)
+ E1000_WRITE_REG(hw, E1000_RQDPC(i), 0);
+ ring->rx_stats.drops += rqdpc_tmp;
+ net_stats->rx_fifo_errors += rqdpc_tmp;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (!ring->vmdq_netdev) {
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
}
-
- do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
- _bytes = ring->rx_stats.bytes;
- _packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
- bytes += _bytes;
- packets += _packets;
+#else
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
+#endif
}
net_stats->rx_bytes = bytes;
@@ -4888,97 +6025,98 @@ void igb_update_stats(struct igb_adapter *adapter,
packets = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
- do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
- _bytes = ring->tx_stats.bytes;
- _packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
- bytes += _bytes;
- packets += _packets;
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (!ring->vmdq_netdev) {
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
+ }
+#else
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
+#endif
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
/* read stats registers */
- adapter->stats.crcerrs += rd32(E1000_CRCERRS);
- adapter->stats.gprc += rd32(E1000_GPRC);
- adapter->stats.gorc += rd32(E1000_GORCL);
- rd32(E1000_GORCH); /* clear GORCL */
- adapter->stats.bprc += rd32(E1000_BPRC);
- adapter->stats.mprc += rd32(E1000_MPRC);
- adapter->stats.roc += rd32(E1000_ROC);
-
- adapter->stats.prc64 += rd32(E1000_PRC64);
- adapter->stats.prc127 += rd32(E1000_PRC127);
- adapter->stats.prc255 += rd32(E1000_PRC255);
- adapter->stats.prc511 += rd32(E1000_PRC511);
- adapter->stats.prc1023 += rd32(E1000_PRC1023);
- adapter->stats.prc1522 += rd32(E1000_PRC1522);
- adapter->stats.symerrs += rd32(E1000_SYMERRS);
- adapter->stats.sec += rd32(E1000_SEC);
-
- mpc = rd32(E1000_MPC);
+ adapter->stats.crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+ adapter->stats.gprc += E1000_READ_REG(hw, E1000_GPRC);
+ adapter->stats.gorc += E1000_READ_REG(hw, E1000_GORCL);
+ E1000_READ_REG(hw, E1000_GORCH); /* clear GORCL */
+ adapter->stats.bprc += E1000_READ_REG(hw, E1000_BPRC);
+ adapter->stats.mprc += E1000_READ_REG(hw, E1000_MPRC);
+ adapter->stats.roc += E1000_READ_REG(hw, E1000_ROC);
+
+ adapter->stats.prc64 += E1000_READ_REG(hw, E1000_PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, E1000_PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, E1000_PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, E1000_PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+ adapter->stats.symerrs += E1000_READ_REG(hw, E1000_SYMERRS);
+ adapter->stats.sec += E1000_READ_REG(hw, E1000_SEC);
+
+ mpc = E1000_READ_REG(hw, E1000_MPC);
adapter->stats.mpc += mpc;
net_stats->rx_fifo_errors += mpc;
- adapter->stats.scc += rd32(E1000_SCC);
- adapter->stats.ecol += rd32(E1000_ECOL);
- adapter->stats.mcc += rd32(E1000_MCC);
- adapter->stats.latecol += rd32(E1000_LATECOL);
- adapter->stats.dc += rd32(E1000_DC);
- adapter->stats.rlec += rd32(E1000_RLEC);
- adapter->stats.xonrxc += rd32(E1000_XONRXC);
- adapter->stats.xontxc += rd32(E1000_XONTXC);
- adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
- adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
- adapter->stats.fcruc += rd32(E1000_FCRUC);
- adapter->stats.gptc += rd32(E1000_GPTC);
- adapter->stats.gotc += rd32(E1000_GOTCL);
- rd32(E1000_GOTCH); /* clear GOTCL */
- adapter->stats.rnbc += rd32(E1000_RNBC);
- adapter->stats.ruc += rd32(E1000_RUC);
- adapter->stats.rfc += rd32(E1000_RFC);
- adapter->stats.rjc += rd32(E1000_RJC);
- adapter->stats.tor += rd32(E1000_TORH);
- adapter->stats.tot += rd32(E1000_TOTH);
- adapter->stats.tpr += rd32(E1000_TPR);
-
- adapter->stats.ptc64 += rd32(E1000_PTC64);
- adapter->stats.ptc127 += rd32(E1000_PTC127);
- adapter->stats.ptc255 += rd32(E1000_PTC255);
- adapter->stats.ptc511 += rd32(E1000_PTC511);
- adapter->stats.ptc1023 += rd32(E1000_PTC1023);
- adapter->stats.ptc1522 += rd32(E1000_PTC1522);
-
- adapter->stats.mptc += rd32(E1000_MPTC);
- adapter->stats.bptc += rd32(E1000_BPTC);
-
- adapter->stats.tpt += rd32(E1000_TPT);
- adapter->stats.colc += rd32(E1000_COLC);
-
- adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
- /* read internal phy specific stats */
- reg = rd32(E1000_CTRL_EXT);
+ adapter->stats.scc += E1000_READ_REG(hw, E1000_SCC);
+ adapter->stats.ecol += E1000_READ_REG(hw, E1000_ECOL);
+ adapter->stats.mcc += E1000_READ_REG(hw, E1000_MCC);
+ adapter->stats.latecol += E1000_READ_REG(hw, E1000_LATECOL);
+ adapter->stats.dc += E1000_READ_REG(hw, E1000_DC);
+ adapter->stats.rlec += E1000_READ_REG(hw, E1000_RLEC);
+ adapter->stats.xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+ adapter->stats.xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+ adapter->stats.xoffrxc += E1000_READ_REG(hw, E1000_XOFFRXC);
+ adapter->stats.xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+ adapter->stats.fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+ adapter->stats.gptc += E1000_READ_REG(hw, E1000_GPTC);
+ adapter->stats.gotc += E1000_READ_REG(hw, E1000_GOTCL);
+ E1000_READ_REG(hw, E1000_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += E1000_READ_REG(hw, E1000_RNBC);
+ adapter->stats.ruc += E1000_READ_REG(hw, E1000_RUC);
+ adapter->stats.rfc += E1000_READ_REG(hw, E1000_RFC);
+ adapter->stats.rjc += E1000_READ_REG(hw, E1000_RJC);
+ adapter->stats.tor += E1000_READ_REG(hw, E1000_TORH);
+ adapter->stats.tot += E1000_READ_REG(hw, E1000_TOTH);
+ adapter->stats.tpr += E1000_READ_REG(hw, E1000_TPR);
+
+ adapter->stats.ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+
+ adapter->stats.mptc += E1000_READ_REG(hw, E1000_MPTC);
+ adapter->stats.bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+ adapter->stats.tpt += E1000_READ_REG(hw, E1000_TPT);
+ adapter->stats.colc += E1000_READ_REG(hw, E1000_COLC);
+
+ adapter->stats.algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+ /* read internal phy sepecific stats */
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
- adapter->stats.rxerrc += rd32(E1000_RXERRC);
+ adapter->stats.rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
/* this stat has invalid values on i210/i211 */
if ((hw->mac.type != e1000_i210) &&
(hw->mac.type != e1000_i211))
- adapter->stats.tncrs += rd32(E1000_TNCRS);
+ adapter->stats.tncrs += E1000_READ_REG(hw, E1000_TNCRS);
}
+ adapter->stats.tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+ adapter->stats.tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
- adapter->stats.tsctc += rd32(E1000_TSCTC);
- adapter->stats.tsctfc += rd32(E1000_TSCTFC);
-
- adapter->stats.iac += rd32(E1000_IAC);
- adapter->stats.icrxoc += rd32(E1000_ICRXOC);
- adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
- adapter->stats.icrxatc += rd32(E1000_ICRXATC);
- adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
- adapter->stats.ictxatc += rd32(E1000_ICTXATC);
- adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
- adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
- adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
+ adapter->stats.iac += E1000_READ_REG(hw, E1000_IAC);
+ adapter->stats.icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+ adapter->stats.icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+ adapter->stats.icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+ adapter->stats.ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+ adapter->stats.ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+ adapter->stats.ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
/* Fill out the OS statistics structure */
net_stats->multicast = adapter->stats.mprc;
@@ -5011,24 +6149,20 @@ void igb_update_stats(struct igb_adapter *adapter,
/* Phy Stats */
if (hw->phy.media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
- (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
+ (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
adapter->phy_stats.idle_errors += phy_tmp;
}
}
/* Management Stats */
- adapter->stats.mgptc += rd32(E1000_MGTPTC);
- adapter->stats.mgprc += rd32(E1000_MGTPRC);
- adapter->stats.mgpdc += rd32(E1000_MGTPDC);
-
- /* OS2BMC Stats */
- reg = rd32(E1000_MANC);
- if (reg & E1000_MANC_EN_BMC2OS) {
- adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
- adapter->stats.o2bspc += rd32(E1000_O2BSPC);
- adapter->stats.b2ospc += rd32(E1000_B2OSPC);
- adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+ adapter->stats.mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
+ adapter->stats.mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
+ if (hw->mac.type > e1000_82580) {
+ adapter->stats.o2bgptc += E1000_READ_REG(hw, E1000_O2BGPTC);
+ adapter->stats.o2bspc += E1000_READ_REG(hw, E1000_O2BSPC);
+ adapter->stats.b2ospc += E1000_READ_REG(hw, E1000_B2OSPC);
+ adapter->stats.b2ogprc += E1000_READ_REG(hw, E1000_B2OGPRC);
}
}
@@ -5036,7 +6170,7 @@ static irqreturn_t igb_msix_other(int irq, void *data)
{
struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
- u32 icr = rd32(E1000_ICR);
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
if (icr & E1000_ICR_DRSTA)
@@ -5063,18 +6197,24 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
+#endif /* HAVE_PTP_1588_CLOCK */
+
+ /* Check for MDD event */
+ if (icr & E1000_ICR_MDDET)
+ igb_process_mdd_event(adapter);
- wr32(E1000_EIMS, adapter->eims_other);
+ E1000_WRITE_REG(hw, E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED;
}
@@ -5111,7 +6251,7 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
static void igb_update_tx_dca(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
int cpu)
@@ -5120,9 +6260,10 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
if (hw->mac.type != e1000_82575)
- txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT_82576;
- /* We can enable relaxed ordering for reads, but not writes when
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
* DCA is enabled. This is due to a known issue in some chipsets
* which will cause the DCA tag to be cleared.
*/
@@ -5130,7 +6271,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
E1000_DCA_TXCTRL_DATA_RRO_EN |
E1000_DCA_TXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+ E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
}
static void igb_update_rx_dca(struct igb_adapter *adapter,
@@ -5141,16 +6282,17 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,
u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
if (hw->mac.type != e1000_82575)
- rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT_82576;
- /* We can enable relaxed ordering for reads, but not writes when
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
* DCA is enabled. This is due to a known issue in some chipsets
* which will cause the DCA tag to be cleared.
*/
rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
E1000_DCA_RXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+ E1000_WRITE_REG(hw, E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
}
static void igb_update_dca(struct igb_q_vector *q_vector)
@@ -5181,7 +6323,7 @@ static void igb_setup_dca(struct igb_adapter *adapter)
return;
/* Always use CB2 mode, difference is masked in the CB driver. */
- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
@@ -5202,9 +6344,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
/* if already enabled, don't do it again */
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
break;
- if (dca_add_requester(dev) == 0) {
+ if (dca_add_requester(dev) == E1000_SUCCESS) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
- dev_info(&pdev->dev, "DCA enabled\n");
+ dev_info(pci_dev_to_dev(pdev), "DCA enabled\n");
igb_setup_dca(adapter);
break;
}
@@ -5215,14 +6357,15 @@ static int __igb_notify_dca(struct device *dev, void *data)
* hanging around in the sysfs model
*/
dca_remove_requester(dev);
- dev_info(&pdev->dev, "DCA disabled\n");
+ dev_info(pci_dev_to_dev(pdev), "DCA disabled\n");
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
+ E1000_WRITE_REG(hw, E1000_DCA_CTRL,
+ E1000_DCA_CTRL_DCA_DISABLE);
}
break;
}
- return 0;
+ return E1000_SUCCESS;
}
static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
@@ -5231,27 +6374,29 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
int ret_val;
ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
- __igb_notify_dca);
+ __igb_notify_dca);
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
-#endif /* CONFIG_IGB_DCA */
+#endif /* IGB_DCA */
-#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- eth_zero_addr(mac_addr);
+ random_ether_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
+#ifdef IFLA_VF_MAX
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
/* By default spoof check is enabled for all VFs */
adapter->vf_data[vf].spoofchk_enabled = true;
+#endif
+#endif
- return 0;
+ return true;
}
-#endif
static void igb_ping_all_vfs(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -5262,26 +6407,71 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
ping = E1000_PF_CONTROL_MSG;
if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
ping |= E1000_VT_MSGTYPE_CTS;
- igb_write_mbx(hw, &ping, 1, i);
+ e1000_write_mbx(hw, &ping, 1, i);
}
}
+/**
+ * igb_mta_set_ - Set multicast filter table address
+ * @adapter: pointer to the adapter structure
+ * @hash_value: determines the MTA register and bit to set
+ *
+ * The multicast table address is a register array of 32-bit registers.
+ * The hash_value is used to determine what register the bit is in, the
+ * current value is read, the new bit is OR'd in and the new value is
+ * written back into the register.
+ **/
+void igb_mta_set(struct igb_adapter *adapter, u32 hash_value)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 hash_bit, hash_reg, mta;
+
+ /*
+ * The MTA is a register array of 32-bit registers. It is
+ * treated like an array of (32*mta_reg_count) bits. We want to
+ * set bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
+ * mask to bits 31:5 of the hash value which gives us the
+ * register we're modifying. The hash bit within that register
+ * is determined by the lower 5 bits of the hash value.
+ */
+ hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+ hash_bit = hash_value & 0x1F;
+
+ mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+
+ mta |= (1 << hash_bit);
+
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+ E1000_WRITE_FLUSH(hw);
+}
+
static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
{
+
struct e1000_hw *hw = &adapter->hw;
- u32 vmolr = rd32(E1000_VMOLR(vf));
+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
IGB_VF_FLAG_MULTI_PROMISC);
vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
+#ifdef IGB_ENABLE_VF_PROMISC
+ if (*msgbuf & E1000_VF_SET_PROMISC_UNICAST) {
+ vmolr |= E1000_VMOLR_ROPE;
+ vf_data->flags |= IGB_VF_FLAG_UNI_PROMISC;
+ *msgbuf &= ~E1000_VF_SET_PROMISC_UNICAST;
+ }
+#endif
if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
vmolr |= E1000_VMOLR_MPME;
vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
*msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
} else {
- /* if we have hashes and we are clearing a multicast promisc
+ /*
+ * if we have hashes and we are clearing a multicast promisc
* flag we need to write the hashes to the MTA as this step
* was previously skipped
*/
@@ -5289,19 +6479,21 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
- igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
}
}
- wr32(E1000_VMOLR(vf), vmolr);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), vmolr);
/* there are flags left unprocessed, likely not supported */
if (*msgbuf & E1000_VT_MSGINFO_MASK)
return -EINVAL;
return 0;
+
}
static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@ -5339,7 +6531,8 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
int i, j;
for (i = 0; i < adapter->vfs_allocated_count; i++) {
- u32 vmolr = rd32(E1000_VMOLR(i));
+ u32 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5350,9 +6543,9 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
} else if (vf_data->num_vf_mc_hashes) {
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
- igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+ igb_mta_set(adapter, vf_data->vf_mc_hashes[j]);
}
- wr32(E1000_VMOLR(i), vmolr);
+ E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
}
}
@@ -5360,13 +6553,14 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
u32 pool_mask, reg, vid;
+ u16 vlan_default;
int i;
pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
/* Find the vlan filter for this id */
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
/* remove the vf from the pool */
reg &= ~pool_mask;
@@ -5376,16 +6570,20 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
(reg & E1000_VLVF_VLANID_ENABLE)) {
reg = 0;
vid = reg & E1000_VLVF_VLANID_MASK;
- igb_vfta_set(hw, vid, false);
+ igb_vfta_set(adapter, vid, FALSE);
}
- wr32(E1000_VLVF(i), reg);
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
}
adapter->vf_data[vf].vlans_enabled = 0;
+
+ vlan_default = adapter->vf_data[vf].default_vf_vlan_id;
+ if (vlan_default)
+ igb_vlvf_set(adapter, vlan_default, true, vf);
}
-static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
u32 reg, i;
@@ -5395,12 +6593,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
return -1;
/* we only need to do this if VMDq is enabled */
- if (!adapter->vfs_allocated_count)
+ if (!adapter->vmdq_pools)
return -1;
/* Find the vlan filter for this id */
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
if ((reg & E1000_VLVF_VLANID_ENABLE) &&
vid == (reg & E1000_VLVF_VLANID_MASK))
break;
@@ -5413,7 +6611,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
* one without the enable bit set
*/
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
if (!(reg & E1000_VLVF_VLANID_ENABLE))
break;
}
@@ -5425,25 +6623,26 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
/* if !enabled we need to set this up in vfta */
if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
/* add VID to filter table */
- igb_vfta_set(hw, vid, true);
+ igb_vfta_set(adapter, vid, TRUE);
reg |= E1000_VLVF_VLANID_ENABLE;
}
reg &= ~E1000_VLVF_VLANID_MASK;
reg |= vid;
- wr32(E1000_VLVF(i), reg);
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
/* do not modify RLPML for PF devices */
if (vf >= adapter->vfs_allocated_count)
- return 0;
+ return E1000_SUCCESS;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
- reg = rd32(E1000_VMOLR(vf));
+
+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
reg &= ~E1000_VMOLR_RLPML_MASK;
reg |= size;
- wr32(E1000_VMOLR(vf), reg);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
}
adapter->vf_data[vf].vlans_enabled++;
@@ -5455,37 +6654,40 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
/* if pool is empty then remove entry from vfta */
if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
reg = 0;
- igb_vfta_set(hw, vid, false);
+ igb_vfta_set(adapter, vid, FALSE);
}
- wr32(E1000_VLVF(i), reg);
+ E1000_WRITE_REG(hw, E1000_VLVF(i), reg);
/* do not modify RLPML for PF devices */
if (vf >= adapter->vfs_allocated_count)
- return 0;
+ return E1000_SUCCESS;
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
- reg = rd32(E1000_VMOLR(vf));
+
+ reg = E1000_READ_REG(hw, E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
reg &= ~E1000_VMOLR_RLPML_MASK;
reg |= size;
- wr32(E1000_VMOLR(vf), reg);
+ E1000_WRITE_REG(hw, E1000_VMOLR(vf), reg);
}
}
}
- return 0;
+ return E1000_SUCCESS;
}
+#ifdef IFLA_VF_MAX
static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
if (vid)
- wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf),
+ (vid | E1000_VMVIR_VLANA_DEFAULT));
else
- wr32(E1000_VMVIR(vf), 0);
+ E1000_WRITE_REG(hw, E1000_VMVIR(vf), 0);
}
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
@@ -5494,7 +6696,9 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int err = 0;
struct igb_adapter *adapter = netdev_priv(netdev);
- if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ /* VLAN IDs accepted range 0-4094 */
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > VLAN_VID_MASK-1)
+ || (qos > 7))
return -EINVAL;
if (vlan || qos) {
err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
@@ -5504,6 +6708,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
igb_set_vmolr(adapter, vf, !vlan);
adapter->vf_data[vf].pf_vlan = vlan;
adapter->vf_data[vf].pf_qos = qos;
+ igb_set_vf_vlan_strip(adapter, vf, true);
dev_info(&adapter->pdev->dev,
"Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
if (test_bit(__IGB_DOWN, &adapter->state)) {
@@ -5513,10 +6718,14 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
"Bring the PF device up before attempting to use the VF device.\n");
}
} else {
+ if (adapter->vf_data[vf].pf_vlan)
+ dev_info(&adapter->pdev->dev,
+ "Clearing VLAN on VF %d\n", vf);
igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
- false, vf);
+ false, vf);
igb_set_vmvir(adapter, vlan, vf);
igb_set_vmolr(adapter, vf, true);
+ igb_set_vf_vlan_strip(adapter, vf, false);
adapter->vf_data[vf].pf_vlan = 0;
adapter->vf_data[vf].pf_qos = 0;
}
@@ -5524,6 +6733,36 @@ out:
return err;
}
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
+static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
+ bool setting)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 dtxswc, reg_offset;
+
+ if (!adapter->vfs_allocated_count)
+ return -EOPNOTSUPP;
+
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+
+ reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
+ dtxswc = E1000_READ_REG(hw, reg_offset);
+ if (setting)
+ dtxswc |= ((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ else
+ dtxswc &= ~((1 << vf) |
+ (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ E1000_WRITE_REG(hw, reg_offset, dtxswc);
+
+ adapter->vf_data[vf].spoofchk_enabled = setting;
+ return E1000_SUCCESS;
+}
+#endif /* HAVE_VF_SPOOFCHK_CONFIGURE */
+#endif /* IFLA_VF_MAX */
+
static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
{
struct e1000_hw *hw = &adapter->hw;
@@ -5532,7 +6771,7 @@ static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid)
/* Find the vlan filter for this id */
for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
- reg = rd32(E1000_VLVF(i));
+ reg = E1000_READ_REG(hw, E1000_VLVF(i));
if ((reg & E1000_VLVF_VLANID_ENABLE) &&
vid == (reg & E1000_VLVF_VLANID_MASK))
break;
@@ -5551,6 +6790,11 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
int err = 0;
+ if (vid)
+ igb_set_vf_vlan_strip(adapter, vf, true);
+ else
+ igb_set_vf_vlan_strip(adapter, vf, false);
+
/* If in promiscuous mode we need to make sure the PF also has
* the VLAN filter set.
*/
@@ -5572,12 +6816,13 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
u32 vlvf, bits;
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
* entry other than the PF.
*/
- vlvf = bits = rd32(E1000_VLVF(regndx));
+ vlvf = bits = E1000_READ_REG(hw, E1000_VLVF(regndx));
bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT +
adapter->vfs_allocated_count);
/* If the filter was removed then ensure PF pool bit
@@ -5585,7 +6830,9 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
* because the PF is in promiscuous mode.
*/
if ((vlvf & VLAN_VID_MASK) == vid &&
+#ifndef HAVE_VLAN_RX_REGISTER
!test_bit(vid, adapter->active_vlans) &&
+#endif
!bits)
igb_vlvf_set(adapter, vid, add,
adapter->vfs_allocated_count);
@@ -5597,7 +6844,9 @@ out:
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear flags - except flag that indicates PF has set the MAC */
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* clear flags except flag that the PF has set the MAC */
adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
adapter->vf_data[vf].last_nack = jiffies;
@@ -5606,27 +6855,40 @@ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
+#ifdef IFLA_VF_MAX
if (adapter->vf_data[vf].pf_vlan)
igb_ndo_set_vf_vlan(adapter->netdev, vf,
adapter->vf_data[vf].pf_vlan,
adapter->vf_data[vf].pf_qos);
else
igb_clear_vf_vfta(adapter, vf);
+#endif
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
/* Flush and reset the mta with the new values */
igb_set_rx_mode(adapter->netdev);
+
+ /*
+ * Reset the VFs TDWBAL and TDWBAH registers which are not
+ * cleared by a VFLR
+ */
+ E1000_WRITE_REG(hw, E1000_TDWBAH(vf), 0);
+ E1000_WRITE_REG(hw, E1000_TDWBAL(vf), 0);
+ if (hw->mac.type == e1000_82576) {
+ E1000_WRITE_REG(hw, E1000_TDWBAH(IGB_MAX_VF_FUNCTIONS + vf), 0);
+ E1000_WRITE_REG(hw, E1000_TDWBAL(IGB_MAX_VF_FUNCTIONS + vf), 0);
+ }
}
static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
{
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- /* clear mac address as we were hotplug removed/added */
+ /* generate a new mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- eth_zero_addr(vf_mac);
+ random_ether_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5647,25 +6909,26 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
/* enable transmit and receive for vf */
- reg = rd32(E1000_VFTE);
- wr32(E1000_VFTE, reg | (1 << vf));
- reg = rd32(E1000_VFRE);
- wr32(E1000_VFRE, reg | (1 << vf));
+ reg = E1000_READ_REG(hw, E1000_VFTE);
+ E1000_WRITE_REG(hw, E1000_VFTE, reg | (1 << vf));
+ reg = E1000_READ_REG(hw, E1000_VFRE);
+ E1000_WRITE_REG(hw, E1000_VFRE, reg | (1 << vf));
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
/* reply to reset with ack and vf mac address */
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, 6);
- igb_write_mbx(hw, msgbuf, 3, vf);
+ e1000_write_mbx(hw, msgbuf, 3, vf);
}
static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
{
- /* The VF MAC Address is stored in a packed array of bytes
+ /*
+ * The VF MAC Address is stored in a packed array of bytes
* starting at the second 32 bit word of the msg array
*/
- unsigned char *addr = (char *)&msg[1];
+ unsigned char *addr = (unsigned char *)&msg[1];
int err = -1;
if (is_valid_ether_addr(addr))
@@ -5683,7 +6946,7 @@ static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
/* if device isn't clear to send it shouldn't be reading either */
if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
- igb_write_mbx(hw, &msg, 1, vf);
+ e1000_write_mbx(hw, &msg, 1, vf);
vf_data->last_nack = jiffies;
}
}
@@ -5696,45 +6959,47 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
s32 retval;
- retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
+ retval = e1000_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
if (retval) {
- /* if receive failed revoke VF CTS stats and restart init */
- dev_err(&pdev->dev, "Error receiving message from VF\n");
- vf_data->flags &= ~IGB_VF_FLAG_CTS;
- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
- return;
- goto out;
+ dev_err(pci_dev_to_dev(pdev), "Error receiving message from VF\n");
+ return;
}
/* this is a message we already processed, do nothing */
if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
return;
- /* until the vf completes a reset it should not be
+ /*
+ * until the vf completes a reset it should not be
* allowed to start any configuration.
*/
+
if (msgbuf[0] == E1000_VF_RESET) {
igb_vf_reset_msg(adapter, vf);
return;
}
if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
- if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
- return;
- retval = -1;
- goto out;
+ msgbuf[0] = E1000_VT_MSGTYPE_NACK;
+ if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
+ e1000_write_mbx(hw, msgbuf, 1, vf);
+ vf_data->last_nack = jiffies;
+ }
+ return;
}
switch ((msgbuf[0] & 0xFFFF)) {
case E1000_VF_SET_MAC_ADDR:
retval = -EINVAL;
+#ifndef IGB_DISABLE_VF_MAC_SET
if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
else
- dev_warn(&pdev->dev,
- "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
- vf);
+ DPRINTK(DRV, INFO,
+ "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
+ vf);
+#endif
break;
case E1000_VF_SET_PROMISC:
retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5747,28 +7012,31 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
break;
case E1000_VF_SET_VLAN:
retval = -1;
+#ifdef IFLA_VF_MAX
if (vf_data->pf_vlan)
- dev_warn(&pdev->dev,
- "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
- vf);
+ DPRINTK(DRV, INFO,
+ "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
+ vf);
else
+#endif
retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
default:
- dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
- retval = -1;
+ dev_err(pci_dev_to_dev(pdev), "Unhandled Msg %08x\n",
+ msgbuf[0]);
+ retval = -E1000_ERR_MBX;
break;
}
- msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
-out:
/* notify the VF of the results of what it sent us */
if (retval)
msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
else
msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
- igb_write_mbx(hw, msgbuf, 1, vf);
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+
+ e1000_write_mbx(hw, msgbuf, 1, vf);
}
static void igb_msg_task(struct igb_adapter *adapter)
@@ -5778,15 +7046,15 @@ static void igb_msg_task(struct igb_adapter *adapter)
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
- if (!igb_check_for_rst(hw, vf))
+ if (!e1000_check_for_rst(hw, vf))
igb_vf_reset_event(adapter, vf);
/* process any messages pending */
- if (!igb_check_for_msg(hw, vf))
+ if (!e1000_check_for_msg(hw, vf))
igb_rcv_msg_from_vf(adapter, vf);
/* process any acks */
- if (!igb_check_for_ack(hw, vf))
+ if (!e1000_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
}
@@ -5811,17 +7079,17 @@ static void igb_set_uta(struct igb_adapter *adapter)
return;
/* we only need to do this if VMDq is enabled */
- if (!adapter->vfs_allocated_count)
+ if (!adapter->vmdq_pools)
return;
for (i = 0; i < hw->mac.uta_reg_count; i++)
- array_wr32(E1000_UTA, i, ~0);
+ E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, ~0);
}
/**
- * igb_intr_msi - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
+ * igb_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
**/
static irqreturn_t igb_intr_msi(int irq, void *data)
{
@@ -5829,7 +7097,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
struct igb_q_vector *q_vector = adapter->q_vector[0];
struct e1000_hw *hw = &adapter->hw;
/* read ICR disables interrupts using IAM */
- u32 icr = rd32(E1000_ICR);
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
igb_write_itr(q_vector);
@@ -5847,16 +7115,18 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
+#endif /* HAVE_PTP_1588_CLOCK */
napi_schedule(&q_vector->napi);
@@ -5864,9 +7134,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
}
/**
- * igb_intr - Legacy Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
+ * igb_intr - Legacy Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
**/
static irqreturn_t igb_intr(int irq, void *data)
{
@@ -5876,7 +7146,7 @@ static irqreturn_t igb_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write
*/
- u32 icr = rd32(E1000_ICR);
+ u32 icr = E1000_READ_REG(hw, E1000_ICR);
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt
@@ -5901,23 +7171,25 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef HAVE_PTP_1588_CLOCK
if (icr & E1000_ICR_TS) {
- u32 tsicr = rd32(E1000_TSICR);
+ u32 tsicr = E1000_READ_REG(hw, E1000_TSICR);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
- wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ E1000_WRITE_REG(hw, E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
+#endif /* HAVE_PTP_1588_CLOCK */
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
-static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
@@ -5932,25 +7204,24 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
if (!test_bit(__IGB_DOWN, &adapter->state)) {
if (adapter->msix_entries)
- wr32(E1000_EIMS, q_vector->eims_value);
+ E1000_WRITE_REG(hw, E1000_EIMS, q_vector->eims_value);
else
igb_irq_enable(adapter);
}
}
/**
- * igb_poll - NAPI Rx polling callback
- * @napi: napi polling structure
- * @budget: count of how many packets we should handle
+ * igb_poll - NAPI Rx polling callback
+ * @napi: napi polling structure
+ * @budget: count of how many packets we should handle
**/
static int igb_poll(struct napi_struct *napi, int budget)
{
struct igb_q_vector *q_vector = container_of(napi,
- struct igb_q_vector,
- napi);
+ struct igb_q_vector, napi);
bool clean_complete = true;
-#ifdef CONFIG_IGB_DCA
+#ifdef IGB_DCA
if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_dca(q_vector);
#endif
@@ -5960,6 +7231,12 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (q_vector->rx.ring)
clean_complete &= igb_clean_rx_irq(q_vector, budget);
+#ifndef HAVE_NETDEV_NAPI_LIST
+ /* if netdev is disabled we need to stop polling */
+ if (!netif_running(q_vector->adapter->netdev))
+ clean_complete = true;
+
+#endif
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
@@ -5972,10 +7249,9 @@ static int igb_poll(struct napi_struct *napi, int budget)
}
/**
- * igb_clean_tx_irq - Reclaim resources after transmit completes
- * @q_vector: pointer to q_vector containing needed info
- *
- * returns true if ring is completely cleaned
+ * igb_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: pointer to q_vector containing needed info
+ * returns TRUE if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{
@@ -6068,16 +7344,20 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
+
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring->tx_stats.bytes += total_bytes;
tx_ring->tx_stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->tx_syncp);
q_vector->tx.total_bytes += total_bytes;
q_vector->tx.total_packets += total_packets;
+#ifdef DEBUG
+ if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags) &&
+ !(adapter->disable_hw_reset && adapter->tx_hang_detected)) {
+#else
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+#endif
struct e1000_hw *hw = &adapter->hw;
/* Detect a transmit hang in hardware, this serializes the
@@ -6086,10 +7366,23 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
- !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
+ (adapter->tx_timeout_factor * HZ))
+ && !(E1000_READ_REG(hw, E1000_STATUS) &
+ E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
+#ifdef DEBUG
+ adapter->tx_hang_detected = TRUE;
+ if (adapter->disable_hw_reset) {
+ DPRINTK(DRV, WARNING,
+ "Deactivating netdev watchdog timer\n");
+ if (del_timer(&netdev_ring(tx_ring)->watchdog_timer))
+ dev_put(netdev_ring(tx_ring));
+#ifndef HAVE_NET_DEVICE_OPS
+ netdev_ring(tx_ring)->tx_timeout = NULL;
+#endif
+ }
+#endif /* DEBUG */
dev_err(tx_ring->dev,
"Detected Tx Unit Hang\n"
" Tx Queue <%d>\n"
@@ -6103,7 +7396,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
" jiffies <%lx>\n"
" desc.status <%x>\n",
tx_ring->queue_index,
- rd32(E1000_TDH(tx_ring->reg_idx)),
+ E1000_READ_REG(hw, E1000_TDH(tx_ring->reg_idx)),
readl(tx_ring->tail),
tx_ring->next_to_use,
tx_ring->next_to_clean,
@@ -6111,8 +7404,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_buffer->next_to_watch,
jiffies,
tx_buffer->next_to_watch->wb.status);
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ if (netif_is_multiqueue(netdev_ring(tx_ring)))
+ netif_stop_subqueue(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring));
+ else
+ netif_stop_queue(netdev_ring(tx_ring));
/* we are about to reset, no point in enabling stuff */
return true;
@@ -6121,33 +7417,63 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
- netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ netif_carrier_ok(netdev_ring(tx_ring)) &&
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->queue_index) &&
- !(test_bit(__IGB_DOWN, &adapter->state))) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
-
- u64_stats_update_begin(&tx_ring->tx_syncp);
- tx_ring->tx_stats.restart_queue++;
- u64_stats_update_end(&tx_ring->tx_syncp);
+ if (netif_is_multiqueue(netdev_ring(tx_ring))) {
+ if (__netif_subqueue_stopped(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring)) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_subqueue(netdev_ring(tx_ring),
+ ring_queue_index(tx_ring));
+ tx_ring->tx_stats.restart_queue++;
+ }
+ } else {
+ if (netif_queue_stopped(netdev_ring(tx_ring)) &&
+ !(test_bit(__IGB_DOWN, &adapter->state))) {
+ netif_wake_queue(netdev_ring(tx_ring));
+ tx_ring->tx_stats.restart_queue++;
+ }
}
}
return !!budget;
}
+#ifdef HAVE_VLAN_RX_REGISTER
+/**
+ * igb_receive_skb - helper function to handle rx indications
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ **/
+static void igb_receive_skb(struct igb_q_vector *q_vector,
+ struct sk_buff *skb)
+{
+ struct vlan_group **vlgrp = netdev_priv(skb->dev);
+
+ if (IGB_CB(skb)->vid) {
+ if (*vlgrp) {
+ vlan_gro_receive(&q_vector->napi, *vlgrp,
+ IGB_CB(skb)->vid, skb);
+ } else {
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ napi_gro_receive(&q_vector->napi, skb);
+ }
+}
+
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
/**
- * igb_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
*
- * Synchronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
**/
static void igb_reuse_rx_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *old_buff)
@@ -6162,7 +7488,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6187,39 +7513,34 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ;
- /* since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
- */
- atomic_set(&page->_count, 2);
#else
/* move offset up to the next cache line */
rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false;
+#endif
/* bump ref count on page before it is given to the stack */
get_page(page);
-#endif
return true;
}
/**
- * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buff to place the data into
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
*
- * This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
@@ -6227,22 +7548,27 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
+ unsigned int pull_len;
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
+ if (unlikely(skb_is_nonlinear(skb)))
+ goto add_tail_frag;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
+#ifdef HAVE_PTP_1588_CLOCK
+ if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+#endif /* HAVE_PTP_1588_CLOCK */
+ if (likely(size <= IGB_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */
@@ -6254,8 +7580,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return false;
}
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ va += pull_len;
+ size -= pull_len;
+
+add_tail_frag:
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, truesize);
+ (unsigned long)va & ~PAGE_MASK, size, truesize);
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
@@ -6290,7 +7629,8 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
return NULL;
}
- /* we will be copying header into skb->data in
+ /*
+ * we will be copying header into skb->data in
* pskb_may_pull so it is in our interest to prefetch
* it now to avoid a possible cache miss
*/
@@ -6320,6 +7660,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
return skb;
}
+#endif
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6331,23 +7672,22 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
return;
/* Rx checksum disabled via ethtool */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM))
return;
/* TCP/UDP checksum error bit is set */
if (igb_test_staterr(rx_desc,
E1000_RXDEXT_STATERR_TCPE |
E1000_RXDEXT_STATERR_IPE)) {
- /* work around errata with sctp packets where the TCPE aka
+ /*
+ * work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets, (aka let the stack check the crc32c)
*/
if (!((skb->len == 60) &&
- test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
- u64_stats_update_begin(&ring->rx_syncp);
+ test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags)))
ring->rx_stats.csum_err++;
- u64_stats_update_end(&ring->rx_syncp);
- }
+
/* let the stack verify checksum errors */
return;
}
@@ -6355,293 +7695,725 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
E1000_RXD_STAT_UDPCS))
skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- dev_dbg(ring->dev, "cksum success: bits %08X\n",
- le32_to_cpu(rx_desc->wb.upper.status_error));
}
+#ifdef NETIF_F_RXHASH
static inline void igb_rx_hash(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (ring->netdev->features & NETIF_F_RXHASH)
- skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+ if (netdev_ring(ring)->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+ PKT_HASH_TYPE_L3);
}
+#endif
+#ifndef IGB_NO_LRO
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
/**
- * igb_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
+ * igb_merge_active_tail - merge active tail into lro skb
+ * @tail: pointer to active tail in frag_list
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
+ * This function merges the length and data of an active tail into the
+ * skb containing the frag_list. It resets the tail's pointer to the head,
+ * but it leaves the heads pointer to tail intact.
**/
-static bool igb_is_non_eop(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc)
+static inline struct sk_buff *igb_merge_active_tail(struct sk_buff *tail)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct sk_buff *head = IGB_CB(tail)->head;
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
+ if (!head)
+ return tail;
- prefetch(IGB_RX_DESC(rx_ring, ntc));
+ head->len += tail->len;
+ head->data_len += tail->len;
+ head->truesize += tail->len;
- if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
- return false;
+ IGB_CB(tail)->head = NULL;
- return true;
+ return head;
}
/**
- * igb_get_headlen - determine size of header for LRO/GRO
- * @data: pointer to the start of the headers
- * @max_len: total length of section to find headers in
+ * igb_add_active_tail - adds an active tail into the skb frag_list
+ * @head: pointer to the start of the skb
+ * @tail: pointer to active tail to add to frag_list
*
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, and GRO offloads. The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
+ * This function adds an active tail to the end of the frag list. This tail
+ * will still be receiving data so we cannot yet ad it's stats to the main
+ * skb. That is done via igb_merge_active_tail.
**/
-static unsigned int igb_get_headlen(unsigned char *data,
- unsigned int max_len)
-{
- union {
- unsigned char *network;
- /* l2 headers */
- struct ethhdr *eth;
- struct vlan_hdr *vlan;
- /* l3 headers */
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- } hdr;
- __be16 protocol;
- u8 nexthdr = 0; /* default to not TCP */
- u8 hlen;
-
- /* this should never happen, but better safe than sorry */
- if (max_len < ETH_HLEN)
- return max_len;
-
- /* initialize network frame pointer */
- hdr.network = data;
-
- /* set first protocol and move network header forward */
- protocol = hdr.eth->h_proto;
- hdr.network += ETH_HLEN;
-
- /* handle any vlan tag if present */
- if (protocol == __constant_htons(ETH_P_8021Q)) {
- if ((hdr.network - data) > (max_len - VLAN_HLEN))
- return max_len;
-
- protocol = hdr.vlan->h_vlan_encapsulated_proto;
- hdr.network += VLAN_HLEN;
- }
-
- /* handle L3 protocols */
- if (protocol == __constant_htons(ETH_P_IP)) {
- if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
- return max_len;
-
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
-
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct iphdr))
- return hdr.network - data;
-
- /* record next protocol if header is present */
- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
- nexthdr = hdr.ipv4->protocol;
- } else if (protocol == __constant_htons(ETH_P_IPV6)) {
- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
- return max_len;
-
- /* record next protocol */
- nexthdr = hdr.ipv6->nexthdr;
- hlen = sizeof(struct ipv6hdr);
+static inline void igb_add_active_tail(struct sk_buff *head,
+ struct sk_buff *tail)
+{
+ struct sk_buff *old_tail = IGB_CB(head)->tail;
+
+ if (old_tail) {
+ igb_merge_active_tail(old_tail);
+ old_tail->next = tail;
} else {
- return hdr.network - data;
+ skb_shinfo(head)->frag_list = tail;
}
- /* relocate pointer to start of L4 header */
- hdr.network += hlen;
+ IGB_CB(tail)->head = head;
+ IGB_CB(head)->tail = tail;
- /* finally sort out TCP */
- if (nexthdr == IPPROTO_TCP) {
- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
- return max_len;
+ IGB_CB(head)->append_cnt++;
+}
- /* access doff as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[12] & 0xF0) >> 2;
+/**
+ * igb_close_active_frag_list - cleanup pointers on a frag_list skb
+ * @head: pointer to head of an active frag list
+ *
+ * This function will clear the frag_tail_tracker pointer on an active
+ * frag_list and returns true if the pointer was actually set
+ **/
+static inline bool igb_close_active_frag_list(struct sk_buff *head)
+{
+ struct sk_buff *tail = IGB_CB(head)->tail;
- /* verify hlen meets minimum size requirements */
- if (hlen < sizeof(struct tcphdr))
- return hdr.network - data;
+ if (!tail)
+ return false;
- hdr.network += hlen;
- } else if (nexthdr == IPPROTO_UDP) {
- if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
- return max_len;
+ igb_merge_active_tail(tail);
- hdr.network += sizeof(struct udphdr);
- }
+ IGB_CB(head)->tail = NULL;
- /* If everything has gone correctly hdr.network should be the
- * data section of the packet and will be the end of the header.
- * If not then it probably represents the end of the last recognized
- * header.
- */
- if ((hdr.network - data) < max_len)
- return hdr.network - data;
- else
- return max_len;
+ return true;
}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
- * igb_pull_tail - igb specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being adjusted
+ * igb_can_lro - returns true if packet is TCP/IPV4 and LRO is enabled
+ * @adapter: board private structure
+ * @rx_desc: pointer to the rx descriptor
+ * @skb: pointer to the skb to be merged
*
- * This function is an igb specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
+ **/
+static inline bool igb_can_lro(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ /* verify hardware indicates this is IPv4/TCP */
+ if ((!(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_TCP)) ||
+ !(pkt_info & cpu_to_le16(E1000_RXDADV_PKTTYPE_IPV4))))
+ return false;
+
+ /* .. and LRO is enabled */
+ if (!(netdev_ring(rx_ring)->features & NETIF_F_LRO))
+ return false;
+
+ /* .. and we are not in promiscuous mode */
+ if (netdev_ring(rx_ring)->flags & IFF_PROMISC)
+ return false;
+
+ /* .. and the header is large enough for us to read IP/TCP fields */
+ if (!pskb_may_pull(skb, sizeof(struct igb_lrohdr)))
+ return false;
+
+ /* .. and there are no VLANs on packet */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ /* .. and we are version 4 with no options */
+ if (*(u8 *)iph != 0x45)
+ return false;
+
+ /* .. and the packet is not fragmented */
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
+ return false;
+
+ /* .. and that next header is TCP */
+ if (iph->protocol != IPPROTO_TCP)
+ return false;
+
+ return true;
+}
+
+static inline struct igb_lrohdr *igb_lro_hdr(struct sk_buff *skb)
+{
+ return (struct igb_lrohdr *)skb->data;
+}
+
+/**
+ * igb_lro_flush - Indicate packets to upper layer.
+ *
+ * Update IP and TCP header part of head skb if more than one
+ * skb's chained and indicate packets to upper layer.
+ **/
+static void igb_lro_flush(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned char *va;
- unsigned int pull_len;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
+ __skb_unlink(skb, &lrolist->active);
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- /* retrieve timestamp from buffer */
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ if (IGB_CB(skb)->append_cnt) {
+ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
- /* update pointers to remove timestamp header */
- skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
- frag->page_offset += IGB_TS_HDR_LEN;
- skb->data_len -= IGB_TS_HDR_LEN;
- skb->len -= IGB_TS_HDR_LEN;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* close any active lro contexts */
+ igb_close_active_frag_list(skb);
- /* move va to start of packet data */
- va += IGB_TS_HDR_LEN;
+#endif
+ /* incorporate ip header and re-calculate checksum */
+ lroh->iph.tot_len = ntohs(skb->len);
+ lroh->iph.check = 0;
+
+ /* header length is 5 since we know no options exist */
+ lroh->iph.check = ip_fast_csum((u8 *)lroh, 5);
+
+ /* clear TCP checksum to indicate we are an LRO frame */
+ lroh->th.check = 0;
+
+ /* incorporate latest timestamp into the tcp header */
+ if (IGB_CB(skb)->tsecr) {
+ lroh->ts[2] = IGB_CB(skb)->tsecr;
+ lroh->ts[1] = htonl(IGB_CB(skb)->tsval);
+ }
+#ifdef NETIF_F_GSO
+
+#ifdef NAPI_GRO_CB
+ NAPI_GRO_CB(skb)->data_offset = 0;
+#endif
+ skb_shinfo(skb)->gso_size = IGB_CB(skb)->mss;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+#endif
}
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+ lrolist->stats.flushed++;
+}
+
+static void igb_lro_flush_all(struct igb_q_vector *q_vector)
+{
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_reverse_walk_safe(&lrolist->active, skb, tmp)
+ igb_lro_flush(q_vector, skb);
+}
+
+/*
+ * igb_lro_header_ok - Main LRO function.
+ **/
+static void igb_lro_header_ok(struct sk_buff *skb)
+{
+ struct igb_lrohdr *lroh = igb_lro_hdr(skb);
+ u16 opt_bytes, data_len;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_CB(skb)->tail = NULL;
+#endif
+ IGB_CB(skb)->tsecr = 0;
+ IGB_CB(skb)->append_cnt = 0;
+ IGB_CB(skb)->mss = 0;
+
+ /* ensure that the checksum is valid */
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ return;
+
+ /* If we see CE codepoint in IP header, packet is not mergeable */
+ if (INET_ECN_is_ce(ipv4_get_dsfield(&lroh->iph)))
+ return;
+
+ /* ensure no bits set besides ack or psh */
+ if (lroh->th.fin || lroh->th.syn || lroh->th.rst ||
+ lroh->th.urg || lroh->th.ece || lroh->th.cwr ||
+ !lroh->th.ack)
+ return;
+
+ /* store the total packet length */
+ data_len = ntohs(lroh->iph.tot_len);
+
+ /* remove any padding from the end of the skb */
+ __pskb_trim(skb, data_len);
+
+ /* remove header length from data length */
+ data_len -= sizeof(struct igb_lrohdr);
+
+ /*
+ * check for timestamps. Since the only option we handle are timestamps,
+ * we only have to handle the simple case of aligned timestamps
*/
- pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
+ opt_bytes = (lroh->th.doff << 2) - sizeof(struct tcphdr);
+ if (opt_bytes != 0) {
+ if ((opt_bytes != TCPOLEN_TSTAMP_ALIGNED) ||
+ !pskb_may_pull(skb, sizeof(struct igb_lrohdr) +
+ TCPOLEN_TSTAMP_ALIGNED) ||
+ (lroh->ts[0] != htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP)) ||
+ (lroh->ts[2] == 0)) {
+ return;
+ }
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ IGB_CB(skb)->tsval = ntohl(lroh->ts[1]);
+ IGB_CB(skb)->tsecr = lroh->ts[2];
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ data_len -= TCPOLEN_TSTAMP_ALIGNED;
+ }
+
+ /* record data_len as mss for the packet */
+ IGB_CB(skb)->mss = data_len;
+ IGB_CB(skb)->next_seq = ntohl(lroh->th.seq);
}
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+static void igb_merge_frags(struct sk_buff *lro_skb, struct sk_buff *new_skb)
+{
+ struct skb_shared_info *sh_info;
+ struct skb_shared_info *new_skb_info;
+ unsigned int data_len;
+
+ sh_info = skb_shinfo(lro_skb);
+ new_skb_info = skb_shinfo(new_skb);
+
+ /* copy frags into the last skb */
+ memcpy(sh_info->frags + sh_info->nr_frags,
+ new_skb_info->frags,
+ new_skb_info->nr_frags * sizeof(skb_frag_t));
+
+ /* copy size data over */
+ sh_info->nr_frags += new_skb_info->nr_frags;
+ data_len = IGB_CB(new_skb)->mss;
+ lro_skb->len += data_len;
+ lro_skb->data_len += data_len;
+ lro_skb->truesize += data_len;
+
+ /* wipe record of data from new_skb */
+ new_skb_info->nr_frags = 0;
+ new_skb->len = new_skb->data_len = 0;
+ dev_kfree_skb_any(new_skb);
+}
+
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
- * igb_cleanup_headers - Correct corrupted or empty headers
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being fixed
- *
- * Address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
+ * igb_lro_receive - if able, queue skb into lro chain
+ * @q_vector: structure containing interrupt and ring information
+ * @new_skb: pointer to current skb being checked
*
- * In addition if skb is not at least 60 bytes we need to pad it so that
- * it is large enough to qualify as a valid Ethernet frame.
- *
- * Returns true if an error was encountered and skb was freed.
+ * Checks whether the skb given is eligible for LRO and if that's
+ * fine chains it to the existing lro_skb based on flowid. If an LRO for
+ * the flow doesn't exist create one.
**/
-static bool igb_cleanup_headers(struct igb_ring *rx_ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
- struct net_device *netdev = rx_ring->netdev;
- if (!(netdev->features & NETIF_F_RXALL)) {
- dev_kfree_skb_any(skb);
- return true;
+static void igb_lro_receive(struct igb_q_vector *q_vector,
+ struct sk_buff *new_skb)
+{
+ struct sk_buff *lro_skb;
+ struct igb_lro_list *lrolist = &q_vector->lrolist;
+ struct igb_lrohdr *lroh = igb_lro_hdr(new_skb);
+ __be32 saddr = lroh->iph.saddr;
+ __be32 daddr = lroh->iph.daddr;
+ __be32 tcp_ports = *(__be32 *)&lroh->th;
+ u16 data_len;
+#ifdef HAVE_VLAN_RX_REGISTER
+ u16 vid = IGB_CB(new_skb)->vid;
+#else
+ u16 vid = new_skb->vlan_tci;
+#endif
+
+ igb_lro_header_ok(new_skb);
+
+ /*
+ * we have a packet that might be eligible for LRO,
+ * so see if it matches anything we might expect
+ */
+ skb_queue_walk(&lrolist->active, lro_skb) {
+ if (*(__be32 *)&igb_lro_hdr(lro_skb)->th != tcp_ports ||
+ igb_lro_hdr(lro_skb)->iph.saddr != saddr ||
+ igb_lro_hdr(lro_skb)->iph.daddr != daddr)
+ continue;
+
+#ifdef HAVE_VLAN_RX_REGISTER
+ if (IGB_CB(lro_skb)->vid != vid)
+#else
+ if (lro_skb->vlan_tci != vid)
+#endif
+ continue;
+
+ /* out of order packet */
+ if (IGB_CB(lro_skb)->next_seq != IGB_CB(new_skb)->next_seq) {
+ igb_lro_flush(q_vector, lro_skb);
+ IGB_CB(new_skb)->mss = 0;
+ break;
+ }
+
+ /* TCP timestamp options have changed */
+ if (!IGB_CB(lro_skb)->tsecr != !IGB_CB(new_skb)->tsecr) {
+ igb_lro_flush(q_vector, lro_skb);
+ break;
+ }
+
+ /* make sure timestamp values are increasing */
+ if (IGB_CB(lro_skb)->tsecr &&
+ IGB_CB(lro_skb)->tsval > IGB_CB(new_skb)->tsval) {
+ igb_lro_flush(q_vector, lro_skb);
+ IGB_CB(new_skb)->mss = 0;
+ break;
}
+
+ data_len = IGB_CB(new_skb)->mss;
+
+ /* Check for all of the above below
+ * malformed header
+ * no tcp data
+ * resultant packet would be too large
+ * new skb is larger than our current mss
+ * data would remain in header
+ * we would consume more frags then the sk_buff contains
+ * ack sequence numbers changed
+ * window size has changed
+ */
+ if (data_len == 0 ||
+ data_len > IGB_CB(lro_skb)->mss ||
+ data_len > IGB_CB(lro_skb)->free ||
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data_len != new_skb->data_len ||
+ skb_shinfo(new_skb)->nr_frags >=
+ (MAX_SKB_FRAGS - skb_shinfo(lro_skb)->nr_frags) ||
+#endif
+ igb_lro_hdr(lro_skb)->th.ack_seq != lroh->th.ack_seq ||
+ igb_lro_hdr(lro_skb)->th.window != lroh->th.window) {
+ igb_lro_flush(q_vector, lro_skb);
+ break;
+ }
+
+ /* Remove IP and TCP header*/
+ skb_pull(new_skb, new_skb->len - data_len);
+
+ /* update timestamp and timestamp echo response */
+ IGB_CB(lro_skb)->tsval = IGB_CB(new_skb)->tsval;
+ IGB_CB(lro_skb)->tsecr = IGB_CB(new_skb)->tsecr;
+
+ /* update sequence and free space */
+ IGB_CB(lro_skb)->next_seq += data_len;
+ IGB_CB(lro_skb)->free -= data_len;
+
+ /* update append_cnt */
+ IGB_CB(lro_skb)->append_cnt++;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ /* if header is empty pull pages into current skb */
+ igb_merge_frags(lro_skb, new_skb);
+#else
+ /* chain this new skb in frag_list */
+ igb_add_active_tail(lro_skb, new_skb);
+#endif
+
+ if ((data_len < IGB_CB(lro_skb)->mss) || lroh->th.psh ||
+ skb_shinfo(lro_skb)->nr_frags == MAX_SKB_FRAGS) {
+ igb_lro_hdr(lro_skb)->th.psh |= lroh->th.psh;
+ igb_lro_flush(q_vector, lro_skb);
+ }
+
+ lrolist->stats.coal++;
+ return;
}
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- igb_pull_tail(rx_ring, rx_desc, skb);
+ if (IGB_CB(new_skb)->mss && !lroh->th.psh) {
+ /* if we are at capacity flush the tail */
+ if (skb_queue_len(&lrolist->active) >= IGB_LRO_MAX) {
+ lro_skb = skb_peek_tail(&lrolist->active);
+ if (lro_skb)
+ igb_lro_flush(q_vector, lro_skb);
+ }
- /* if skb_pad returns an error the skb was freed */
- if (unlikely(skb->len < 60)) {
- int pad_len = 60 - skb->len;
+ /* update sequence and free space */
+ IGB_CB(new_skb)->next_seq += IGB_CB(new_skb)->mss;
+ IGB_CB(new_skb)->free = 65521 - new_skb->len;
- if (skb_pad(skb, pad_len))
- return true;
- __skb_put(skb, pad_len);
+ /* .. and insert at the front of the active list */
+ __skb_queue_head(&lrolist->active, new_skb);
+
+ lrolist->stats.coal++;
+ return;
}
- return false;
+ /* packet not handled by any of the above, pass it to the stack */
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, new_skb);
+#else
+ napi_gro_receive(&q_vector->napi, new_skb);
+#endif
}
+#endif /* IGB_NO_LRO */
/**
- * igb_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @rx_desc: pointer to the EOP Rx descriptor
- * @skb: pointer to current skb being populated
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
*
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
- * other fields within the skb.
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
**/
static void igb_process_skb_fields(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ bool notype;
+#ifdef NETIF_F_RXHASH
igb_rx_hash(rx_ring, rx_desc, skb);
+#endif
igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+ /* update packet type stats */
+ switch (pkt_info & E1000_RXDADV_PKTTYPE_ILMASK) {
+ case E1000_RXDADV_PKTTYPE_IPV4:
+ rx_ring->pkt_stats.ipv4_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_IPV4_EX:
+ rx_ring->pkt_stats.ipv4e_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_IPV6:
+ rx_ring->pkt_stats.ipv6_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_IPV6_EX:
+ rx_ring->pkt_stats.ipv6e_packets++;
+ break;
+ default:
+ notype = true;
+ break;
+ }
+
+ switch (pkt_info & E1000_RXDADV_PKTTYPE_TLMASK) {
+ case E1000_RXDADV_PKTTYPE_TCP:
+ rx_ring->pkt_stats.tcp_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_UDP:
+ rx_ring->pkt_stats.udp_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_SCTP:
+ rx_ring->pkt_stats.sctp_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_NFS:
+ rx_ring->pkt_stats.nfs_packets++;
+ break;
+ case E1000_RXDADV_PKTTYPE_NONE:
+ if (notype)
+ rx_ring->pkt_stats.other_packets++;
+ break;
+ default:
+ break;
+ }
+#ifdef HAVE_PTP_1588_CLOCK
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
+
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+#else
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+#endif
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid;
+ u16 vid = 0;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
-
+#ifdef HAVE_VLAN_RX_REGISTER
+ IGB_CB(skb)->vid = vid;
+ } else {
+ IGB_CB(skb)->vid = 0;
+#else
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+#endif
}
skb_record_rx_queue(skb, rx_ring->queue_index);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
+}
+
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+/* igb_clean_rx_irq -- * legacy */
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ struct igb_rx_buffer *rx_buffer;
+ union e1000_adv_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ u16 ntc;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ ntc = rx_ring->next_to_clean;
+ rx_desc = IGB_RX_DESC(rx_ring, ntc);
+ rx_buffer = &rx_ring->rx_buffer_info[ntc];
+
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ skb = rx_buffer->skb;
+
+ prefetch(skb->data);
+
+ /* pull the header of the skb in */
+ __skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+
+ /* clear skb reference in buffer info structure */
+ rx_buffer->skb = NULL;
+
+ cleaned_count++;
+
+ BUG_ON(igb_is_non_eop(rx_ring, rx_desc));
+
+ dma_unmap_single(rx_ring->dev, rx_buffer->dma,
+ rx_ring->rx_buffer_len,
+ DMA_FROM_DEVICE);
+ rx_buffer->dma = 0;
+
+ if (igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ total_bytes += skb->len;
+
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+#ifndef IGB_NO_LRO
+ if (igb_can_lro(rx_ring, rx_desc, skb))
+ igb_lro_receive(q_vector, skb);
+ else
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+
+#ifndef NETIF_F_GRO
+ netdev_ring(rx_ring)->last_rx = jiffies;
+
+#endif
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+#ifndef IGB_NO_LRO
+ igb_lro_flush_all(q_vector);
+
+#endif /* IGB_NO_LRO */
+ return (total_packets < budget);
+}
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+ }
+
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
+
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
+
+ return false;
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+/* igb_clean_rx_irq -- * packet split */
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
{
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
@@ -6662,7 +8434,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
- /* This memory barrier is needed to keep us from reading
+ /*
+ * This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* RXD_STAT_DD bit is set
*/
@@ -6693,7 +8466,20 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* populate checksum, timestamp, VLAN, and protocol */
igb_process_skb_fields(rx_ring, rx_desc, skb);
- napi_gro_receive(&q_vector->napi, skb);
+#ifndef IGB_NO_LRO
+ if (igb_can_lro(rx_ring, rx_desc, skb))
+ igb_lro_receive(q_vector, skb);
+ else
+#endif
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_receive_skb(q_vector, skb);
+#else
+ napi_gro_receive(&q_vector->napi, skb);
+#endif
+#ifndef NETIF_F_GRO
+
+ netdev_ring(rx_ring)->last_rx = jiffies;
+#endif
/* reset skb pointer */
skb = NULL;
@@ -6705,19 +8491,64 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
- u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
q_vector->rx.total_packets += total_packets;
q_vector->rx.total_bytes += total_bytes;
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
+#ifndef IGB_NO_LRO
+ igb_lro_flush_all(q_vector);
+
+#endif /* IGB_NO_LRO */
return (total_packets < budget);
}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *bi)
+{
+ struct sk_buff *skb = bi->skb;
+ dma_addr_t dma = bi->dma;
+
+ if (dma)
+ return true;
+ if (likely(!skb)) {
+ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring),
+ rx_ring->rx_buffer_len);
+ bi->skb = skb;
+ if (!skb) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ /* initialize skb for ring */
+ skb_record_rx_queue(skb, ring_queue_index(rx_ring));
+ }
+
+ dma = dma_map_single(rx_ring->dev, skb->data,
+ rx_ring->rx_buffer_len, DMA_FROM_DEVICE);
+
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ dev_kfree_skb_any(skb);
+ bi->skb = NULL;
+
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
+ }
+
+ bi->dma = dma;
+ return true;
+}
+
+#else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -6729,7 +8560,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
/* alloc new page for storage */
- page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
return false;
@@ -6738,7 +8569,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
/* map page for use */
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* if mapping failed free memory back to system since
+ /*
+ * if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
@@ -6755,9 +8587,10 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
**/
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
@@ -6774,13 +8607,22 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count;
do {
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ if (!igb_alloc_mapped_skb(rx_ring, bi))
+#else
if (!igb_alloc_mapped_page(rx_ring, bi))
+#endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
break;
- /* Refresh the desc even if buffer_addrs didn't change
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+#else
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+#endif
rx_desc++;
bi++;
@@ -6803,10 +8645,13 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* record the next descriptor to use */
rx_ring->next_to_use = i;
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = i;
- /* Force memory writes to complete before letting h/w
+#endif
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -6816,6 +8661,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
}
}
+#ifdef SIOCGMIIPHY
/**
* igb_mii_ioctl -
* @netdev:
@@ -6835,17 +8681,20 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
data->phy_id = adapter->hw.phy.addr;
break;
case SIOCGMIIREG:
- if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
default:
return -EOPNOTSUPP;
}
- return 0;
+ return E1000_SUCCESS;
}
+#endif
/**
* igb_ioctl -
* @netdev:
@@ -6855,140 +8704,295 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
+#ifdef SIOCGMIIPHY
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+#endif
+#ifdef HAVE_PTP_1588_CLOCK
+#ifdef SIOCGHWTSTAMP
+ case SIOCGHWTSTAMP:
+ return igb_ptp_get_ts_config(netdev, ifr);
+#endif
case SIOCSHWTSTAMP:
- return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_set_ts_config(netdev, ifr);
+#endif /* HAVE_PTP_1588_CLOCK */
+#ifdef ETHTOOL_OPS_COMPAT
+ case SIOCETHTOOL:
+ return ethtool_ioctl(ifr);
+#endif
default:
return -EOPNOTSUPP;
}
}
-s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
- if (pcie_capability_read_word(adapter->pdev, reg, value))
+ pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+
+ pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+ struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
+
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
return -E1000_ERR_CONFIG;
- return 0;
+ pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+ return E1000_SUCCESS;
}
-s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
+ u16 cap_offset;
- if (pcie_capability_write_word(adapter->pdev, reg, *value))
+ cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+ if (!cap_offset)
return -E1000_ERR_CONFIG;
- return 0;
+ pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
+
+ return E1000_SUCCESS;
}
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
+#ifdef HAVE_VLAN_RX_REGISTER
+static void igb_vlan_mode(struct net_device *netdev, struct vlan_group *vlgrp)
+#else
+void igb_vlan_mode(struct net_device *netdev, u32 features)
+#endif /* HAVE_VLAN_RX_REGISTER */
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl;
- bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ bool enable;
+ int i;
+#ifdef HAVE_VLAN_RX_REGISTER
+ enable = !!vlgrp;
+ igb_irq_disable(adapter);
+
+ adapter->vlgrp = vlgrp;
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+#else
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
+ enable = !!(features & NETIF_F_HW_VLAN_RX);
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_VLAN_RX_REGISTER */
if (enable) {
/* enable VLAN tag insert/strip */
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
/* Disable CFI check */
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= ~E1000_RCTL_CFIEN;
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
} else {
/* disable VLAN tag insert/strip */
- ctrl = rd32(E1000_CTRL);
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
ctrl &= ~E1000_CTRL_VME;
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+ }
+
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ for (i = 0; i < adapter->vmdq_pools; i++) {
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count + i,
+ enable);
}
+#else
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count,
+ enable);
+
+ for (i = 1; i < adapter->vmdq_pools; i++) {
+#ifdef HAVE_VLAN_RX_REGISTER
+ struct igb_vmdq_adapter *vadapter;
+
+ vadapter = netdev_priv(adapter->vmdq_netdev[i-1]);
+
+ enable = !!vadapter->vlgrp;
+#else
+ struct net_device *vnetdev;
+
+ vnetdev = adapter->vmdq_netdev[i-1];
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_CTAG_RX);
+#else
+ enable = !!(vnetdev->features & NETIF_F_HW_VLAN_RX);
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_VLAN_RX_REGISTER */
+ igb_set_vf_vlan_strip(adapter,
+ adapter->vfs_allocated_count + i,
+ enable);
+ }
+
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
igb_rlpml_set(adapter);
}
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
static int igb_vlan_rx_add_vid(struct net_device *netdev,
- __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
+#else
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#else
+static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count;
/* attempt to add filter to vlvf array */
- igb_vlvf_set(adapter, vid, true, pf_id);
+ igb_vlvf_set(adapter, vid, TRUE, pf_id);
/* add the filter since PF can receive vlans w/o entry in vlvf */
- igb_vfta_set(hw, vid, true);
+ igb_vfta_set(adapter, vid, TRUE);
+#ifndef HAVE_NETDEV_VLAN_FEATURES
- set_bit(vid, adapter->active_vlans);
+ /* Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ * There is no need to update netdev for vlan 0 (DCB), since it
+ * wouldn't has v_netdev.
+ */
+ if (adapter->vlgrp) {
+ struct vlan_group *vlgrp = adapter->vlgrp;
+ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid);
+ if (v_netdev) {
+ v_netdev->features |= netdev->features;
+ vlan_group_set_device(vlgrp, vid, v_netdev);
+ }
+ }
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ set_bit(vid, adapter->active_vlans);
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
return 0;
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
}
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
static int igb_vlan_rx_kill_vid(struct net_device *netdev,
- __be16 proto, u16 vid)
+ __always_unused __be16 proto, u16 vid)
+#else
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#else
+static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
int pf_id = adapter->vfs_allocated_count;
s32 err;
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ igb_irq_enable(adapter);
+
+#endif /* HAVE_VLAN_RX_REGISTER */
/* remove vlan from VLVF table array */
- err = igb_vlvf_set(adapter, vid, false, pf_id);
+ err = igb_vlvf_set(adapter, vid, FALSE, pf_id);
/* if vid was not present in VLVF just remove it from table */
if (err)
- igb_vfta_set(hw, vid, false);
+ igb_vfta_set(adapter, vid, FALSE);
+#ifndef HAVE_VLAN_RX_REGISTER
clear_bit(vid, adapter->active_vlans);
-
+#endif /* HAVE_VLAN_RX_REGISTER */
+#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID
return 0;
+#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */
}
static void igb_restore_vlan(struct igb_adapter *adapter)
{
+#ifdef HAVE_VLAN_RX_REGISTER
+ igb_vlan_mode(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
+#else
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+ }
+ }
+#else
u16 vid;
igb_vlan_mode(adapter->netdev, adapter->netdev->features);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
+#ifdef NETIF_F_HW_VLAN_CTAG_RX
+ igb_vlan_rx_add_vid(adapter->netdev,
+ htons(ETH_P_8021Q), vid);
+#else
+ igb_vlan_rx_add_vid(adapter->netdev, vid);
+#endif /* NETIF_F_HW_VLAN_CTAG_RX */
+#endif /* HAVE_VLAN_RX_REGISTER */
}
-int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
+int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
mac->autoneg = 0;
- /* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work
- */
- if ((spd & 1) || (dplx & ~1))
- goto err_inval;
-
- /* Fiber NIC's only allow 1000 gbps Full duplex
- * and 100Mbps Full duplex for 100baseFx sfp
+ /* SerDes device's does not support 10Mbps Full/duplex
+ * and 100Mbps Half duplex
*/
if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
- switch (spd + dplx) {
+ switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
case SPEED_10 + DUPLEX_FULL:
case SPEED_100 + DUPLEX_HALF:
- goto err_inval;
+ dev_err(pci_dev_to_dev(pdev),
+ "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
default:
break;
}
}
- switch (spd + dplx) {
+ switch (spddplx) {
case SPEED_10 + DUPLEX_HALF:
mac->forced_speed_duplex = ADVERTISE_10_HALF;
break;
@@ -7007,17 +9011,52 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- goto err_inval;
+ dev_err(pci_dev_to_dev(pdev), "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
}
/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
adapter->hw.phy.mdix = AUTO_ALL_MODES;
return 0;
+}
+
+/* This function should only be called if RTNL lock is held */
+int igb_setup_queues(struct igb_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ int err;
+
+ if (adapter->rss_queues == adapter->num_rx_queues) {
+ if (adapter->tss_queues) {
+ if (adapter->tss_queues == adapter->num_tx_queues)
+ return 0;
+ } else if (adapter->vfs_allocated_count ||
+ adapter->rss_queues == adapter->num_tx_queues) {
+ return 0;
+ }
+ }
+
+ /*
+ * Hardware has to reinitialize queues and interrupts to
+ * match the new configuration. Unfortunately, the hardware
+ * is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ igb_close(dev);
+
+ igb_clear_interrupt_scheme(adapter);
-err_inval:
- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ err = igb_init_interrupt_scheme(adapter, true);
+ if (err) {
+ dev_close(dev);
+ return err;
+ }
+
+ if (netif_running(dev))
+ err = igb_open(dev);
+
+ return err;
}
static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
@@ -7034,6 +9073,10 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
netif_device_detach(netdev);
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if (status & E1000_STATUS_LU)
+ wufc &= ~E1000_WUFC_LNKC;
+
if (netif_running(netdev))
__igb_close(netdev, true);
@@ -7045,37 +9088,31 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
return retval;
#endif
- status = rd32(E1000_STATUS);
- if (status & E1000_STATUS_LU)
- wufc &= ~E1000_WUFC_LNKC;
-
if (wufc) {
igb_setup_rctl(adapter);
igb_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_MPE;
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
- ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
+ ctrl = E1000_READ_REG(hw, E1000_CTRL);
/* phy power management enable */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
- wr32(E1000_CTRL, ctrl);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
/* Allow time for pending master requests to run */
- igb_disable_pcie_master(hw);
+ e1000_disable_pcie_master(hw);
- wr32(E1000_WUC, E1000_WUC_PME_EN);
- wr32(E1000_WUFC, wufc);
+ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PME_EN);
+ E1000_WRITE_REG(hw, E1000_WUFC, wufc);
} else {
- wr32(E1000_WUC, 0);
- wr32(E1000_WUFC, 0);
+ E1000_WRITE_REG(hw, E1000_WUC, 0);
+ E1000_WRITE_REG(hw, E1000_WUFC, 0);
}
*enable_wake = wufc || adapter->en_mng_pt;
@@ -7095,12 +9132,17 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
#ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_suspend(struct device *dev)
+#else
+static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
+ struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
int retval;
bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
@@ -7115,11 +9157,16 @@ static int igb_suspend(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_resume(struct device *dev)
+#else
+static int igb_resume(struct pci_dev *pdev)
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
{
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
struct pci_dev *pdev = to_pci_dev(dev);
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7131,7 +9178,7 @@ static int igb_resume(struct device *dev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
+ dev_err(pci_dev_to_dev(pdev),
"igb: Cannot enable PCI device from suspend\n");
return err;
}
@@ -7141,18 +9188,18 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3cold, 0);
if (igb_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ dev_err(pci_dev_to_dev(pdev),
+ "Unable to allocate memory for queues\n");
return -ENOMEM;
}
igb_reset(adapter);
- /* let the f/w know that the h/w is now under the control of the
- * driver.
+ /* let the f/w know that the h/w is now under the control of the driver.
*/
igb_get_hw_control(adapter);
- wr32(E1000_WUS, ~0);
+ E1000_WRITE_REG(hw, E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
rtnl_lock();
@@ -7163,10 +9210,12 @@ static int igb_resume(struct device *dev)
}
netif_device_attach(netdev);
+
return 0;
}
#ifdef CONFIG_PM_RUNTIME
+#ifdef HAVE_SYSTEM_SLEEP_PM_OPS
static int igb_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -7203,89 +9252,51 @@ static int igb_runtime_resume(struct device *dev)
{
return igb_resume(dev);
}
+#endif /* HAVE_SYSTEM_SLEEP_PM_OPS */
#endif /* CONFIG_PM_RUNTIME */
-#endif
+#endif /* CONFIG_PM */
-static void igb_shutdown(struct pci_dev *pdev)
+#ifdef USE_REBOOT_NOTIFIER
+/* only want to do this for 2.4 kernels? */
+static int igb_notify_reboot(struct notifier_block *nb, unsigned long event,
+ void *p)
{
+ struct pci_dev *pdev = NULL;
bool wake;
- __igb_shutdown(pdev, &wake, 0);
-
- if (system_state == SYSTEM_POWER_OFF) {
- pci_wake_from_d3(pdev, wake);
- pci_set_power_state(pdev, PCI_D3hot);
+ switch (event) {
+ case SYS_DOWN:
+ case SYS_HALT:
+ case SYS_POWER_OFF:
+ while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (pci_dev_driver(pdev) == &igb_driver) {
+ __igb_shutdown(pdev, &wake, 0);
+ if (event == SYS_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+ }
+ }
}
+ return NOTIFY_DONE;
}
-
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
+#else
+static void igb_shutdown(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- rtnl_lock();
-
- if (netif_running(netdev))
- igb_close(netdev);
-
- igb_clear_interrupt_scheme(adapter);
+ bool wake = false;
- igb_init_queue_configuration(adapter);
+ __igb_shutdown(pdev, &wake, 0);
- if (igb_init_interrupt_scheme(adapter, true)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- return -ENOMEM;
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, wake);
+ pci_set_power_state(pdev, PCI_D3hot);
}
-
- if (netif_running(netdev))
- igb_open(netdev);
-
- rtnl_unlock();
-
- return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
- int err = igb_disable_sriov(dev);
-
- if (!err)
- err = igb_sriov_reinit(dev);
-
- return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
- int err = igb_enable_sriov(dev, num_vfs);
-
- if (err)
- goto out;
-
- err = igb_sriov_reinit(dev);
- if (!err)
- return num_vfs;
-
-out:
- return err;
-}
-
-#endif
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- if (num_vfs == 0)
- return igb_pci_disable_sriov(dev);
- else
- return igb_pci_enable_sriov(dev, num_vfs);
-#endif
- return 0;
}
+#endif /* USE_REBOOT_NOTIFIER */
#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
@@ -7299,7 +9310,7 @@ static void igb_netpoll(struct net_device *netdev)
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
if (adapter->msix_entries)
- wr32(E1000_EIMC, q_vector->eims_value);
+ E1000_WRITE_REG(hw, E1000_EIMC, q_vector->eims_value);
else
igb_irq_disable(adapter);
napi_schedule(&q_vector->napi);
@@ -7307,20 +9318,98 @@ static void igb_netpoll(struct net_device *netdev)
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
+#ifdef HAVE_PCI_ERS
+#define E1000_DEV_ID_82576_VF 0x10CA
/**
- * igb_io_error_detected - called when PCI error is detected
- * @pdev: Pointer to PCI device
- * @state: The current pci connection state
+ * igb_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
- * This function is called after a PCI bus error affecting
- * this device has been detected.
- **/
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *bdev, *vfdev;
+ u32 dw0, dw1, dw2, dw3;
+ int vf, pos;
+ u16 req_id, pf_func;
+
+ if (!(adapter->flags & IGB_FLAG_DETECT_BAD_DMA))
+ goto skip_bad_vf_detection;
+
+ bdev = pdev->bus->self;
+ while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
+ bdev = bdev->bus->self;
+
+ if (!bdev)
+ goto skip_bad_vf_detection;
+
+ pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ goto skip_bad_vf_detection;
+
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
+ pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+
+ req_id = dw1 >> 16;
+ /* On the 82576 if bit 7 of the requestor ID is set then it's a VF */
+ if (!(req_id & 0x0080))
+ goto skip_bad_vf_detection;
+
+ pf_func = req_id & 0x01;
+ if ((pf_func & 1) == (pdev->devfn & 1)) {
+
+ vf = (req_id & 0x7F) >> 1;
+ dev_err(pci_dev_to_dev(pdev),
+ "VF %d has caused a PCIe error\n", vf);
+ dev_err(pci_dev_to_dev(pdev),
+ "TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:\n%8.8x\tdw3: %8.8x\n",
+ dw0, dw1, dw2, dw3);
+
+ /* Find the pci device of the offending VF */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ E1000_DEV_ID_82576_VF, NULL);
+ while (vfdev) {
+ if (vfdev->devfn == (req_id & 0xFF))
+ break;
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ E1000_DEV_ID_82576_VF, vfdev);
+ }
+ /*
+ * There's a slim chance the VF could have been hot plugged,
+ * so if it is no longer present we don't need to issue the
+ * VFLR. Just clean up the AER in that case.
+ */
+ if (vfdev) {
+ dev_err(pci_dev_to_dev(pdev),
+ "Issuing VFLR to VF %d\n", vf);
+ pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ }
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ }
+
+ /*
+ * Even though the error may have occurred on the other port
+ * we still need to increment the vf error reference count for
+ * both ports because the I/O resume function will be called
+ * for both of them.
+ */
+ adapter->vferr_refcount++;
+
+ return PCI_ERS_RESULT_RECOVERED;
+
+skip_bad_vf_detection:
+#endif /* CONFIG_PCI_IOV */
+
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
@@ -7335,22 +9424,21 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
}
/**
- * igb_io_slot_reset - called after the pci bus has been reset.
- * @pdev: Pointer to PCI device
+ * igb_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
*
- * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
- **/
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the igb_resume routine.
+ */
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
- dev_err(&pdev->dev,
+ dev_err(pci_dev_to_dev(pdev),
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
@@ -7361,77 +9449,91 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- igb_reset(adapter);
- wr32(E1000_WUS, ~0);
+ schedule_work(&adapter->reset_task);
+ E1000_WRITE_REG(hw, E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
+ pci_cleanup_aer_uncorrect_error_status(pdev);
return result;
}
/**
- * igb_io_resume - called when traffic can start flowing again.
- * @pdev: Pointer to PCI device
+ * igb_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
*
- * This callback is called when the error recovery driver tells us that
- * its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
+ if (adapter->vferr_refcount) {
+ dev_info(pci_dev_to_dev(pdev), "Resuming after VF err\n");
+ adapter->vferr_refcount--;
+ return;
+ }
+
if (netif_running(netdev)) {
if (igb_up(adapter)) {
- dev_err(&pdev->dev, "igb_up failed after reset\n");
+ dev_err(pci_dev_to_dev(pdev), "igb_up failed after reset\n");
return;
}
}
netif_device_attach(netdev);
- /* let the f/w know that the h/w is now under the control of the
- * driver.
+ /* let the f/w know that the h/w is now under the control of the driver.
*/
igb_get_hw_control(adapter);
}
-static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
- u8 qsel)
+#endif /* HAVE_PCI_ERS */
+
+int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
{
- u32 rar_low, rar_high;
struct e1000_hw *hw = &adapter->hw;
+ int i;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
- ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
- rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
-
- /* Indicate to hardware the Address is Valid. */
- rar_high |= E1000_RAH_AV;
-
- if (hw->mac.type == e1000_82575)
- rar_high |= E1000_RAH_POOL_1 * qsel;
- else
- rar_high |= E1000_RAH_POOL_1 << qsel;
+ if (is_zero_ether_addr(addr))
+ return 0;
- wr32(E1000_RAL(index), rar_low);
- wrfl();
- wr32(E1000_RAH(index), rar_high);
- wrfl();
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state = (IGB_MAC_STATE_MODIFIED |
+ IGB_MAC_STATE_IN_USE);
+ memcpy(adapter->mac_table[i].addr, addr, ETH_ALEN);
+ adapter->mac_table[i].queue = queue;
+ igb_sync_mac_table(adapter);
+ return 0;
+ }
+ return -ENOMEM;
}
+int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct e1000_hw *hw = &adapter->hw;
+ if (is_zero_ether_addr(addr))
+ return 0;
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
+ if (!ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state = IGB_MAC_STATE_MODIFIED;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ igb_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
static int igb_set_vf_mac(struct igb_adapter *adapter,
int vf, unsigned char *mac_addr)
{
@@ -7448,15 +9550,17 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
return 0;
}
+#ifdef IFLA_VF_MAX
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+
if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
return -EINVAL;
adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
dev_info(&adapter->pdev->dev,
- "Reload the VF driver to make this change effective.");
+ "Reload the VF driver to make this change effective.\n");
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_warn(&adapter->pdev->dev,
"The VF MAC address has been set, but the PF device is not up.\n");
@@ -7473,13 +9577,15 @@ static int igb_link_mbps(int internal_link_speed)
return 100;
case SPEED_1000:
return 1000;
+ case SPEED_2500:
+ return 2500;
default:
return 0;
}
}
static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
- int link_speed)
+ int link_speed)
{
int rf_dec, rf_int;
u32 bcnrc_val;
@@ -7488,23 +9594,23 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
- tx_rate;
+ rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
- E1000_RTTBCNRC_RF_INT_MASK);
+ bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
+ E1000_RTTBCNRC_RF_INT_MASK);
bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
} else {
bcnrc_val = 0;
}
- wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
- /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ E1000_WRITE_REG(hw, E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
*/
- wr32(E1000_RTTBCNRM, 0x14);
- wr32(E1000_RTTBCNRC, bcnrc_val);
+ E1000_WRITE_REG(hw, E1000_RTTBCNRM(0), 0x14);
+ E1000_WRITE_REG(hw, E1000_RTTBCNRC, bcnrc_val);
}
static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
@@ -7512,9 +9618,9 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
int actual_link_speed, i;
bool reset_rate = false;
- /* VF TX rate limit was not set or not supported */
+ /* VF TX rate limit was not set */
if ((adapter->vf_rate_link_speed == 0) ||
- (adapter->hw.mac.type != e1000_82576))
+ (adapter->hw.mac.type != e1000_82576))
return;
actual_link_speed = igb_link_mbps(adapter->link_speed);
@@ -7522,7 +9628,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
reset_rate = true;
adapter->vf_rate_link_speed = 0;
dev_info(&adapter->pdev->dev,
- "Link speed has been changed. VF Transmit rate is disabled\n");
+ "Link speed has been changed. VF Transmit rate is disabled\n");
}
for (i = 0; i < adapter->vfs_allocated_count; i++) {
@@ -7530,12 +9636,16 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
adapter->vf_data[i].tx_rate = 0;
igb_set_vf_rate_limit(&adapter->hw, i,
- adapter->vf_data[i].tx_rate,
- actual_link_speed);
+ adapter->vf_data[i].tx_rate, actual_link_speed);
}
}
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate)
+#else
static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7546,96 +9656,135 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
actual_link_speed = igb_link_mbps(adapter->link_speed);
if ((vf >= adapter->vfs_allocated_count) ||
- (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
+ (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) ||
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ (max_tx_rate < 0) || (max_tx_rate > actual_link_speed))
+#else
+ (tx_rate < 0) || (tx_rate > actual_link_speed))
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
+#else
adapter->vf_data[vf].tx_rate = (u16)tx_rate;
igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
return 0;
}
-static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
- bool setting)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- u32 reg_val, reg_offset;
-
- if (!adapter->vfs_allocated_count)
- return -EOPNOTSUPP;
-
- if (vf >= adapter->vfs_allocated_count)
- return -EINVAL;
-
- reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
- reg_val = rd32(reg_offset);
- if (setting)
- reg_val |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
- else
- reg_val &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
- wr32(reg_offset, reg_val);
-
- adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
-}
-
static int igb_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+
if (vf >= adapter->vfs_allocated_count)
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
+#else
ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
+#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
+#endif
return 0;
}
-
+#endif
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ int count;
u32 reg;
switch (hw->mac.type) {
case e1000_82575:
- case e1000_i210:
- case e1000_i211:
- case e1000_i354:
default:
/* replication is not supported for 82575 */
return;
case e1000_82576:
/* notify HW that the MAC is adding vlan tags */
- reg = rd32(E1000_DTXCTL);
- reg |= E1000_DTXCTL_VLAN_ADDED;
- wr32(E1000_DTXCTL, reg);
+ reg = E1000_READ_REG(hw, E1000_DTXCTL);
+ reg |= (E1000_DTXCTL_VLAN_ADDED |
+ E1000_DTXCTL_SPOOF_INT);
+ E1000_WRITE_REG(hw, E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
- reg = rd32(E1000_RPLOLR);
+ reg = E1000_READ_REG(hw, E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
- wr32(E1000_RPLOLR, reg);
+ E1000_WRITE_REG(hw, E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
+ case e1000_i354:
/* none of the above registers are supported by i350 */
break;
}
- if (adapter->vfs_allocated_count) {
- igb_vmdq_set_loopback_pf(hw, true);
- igb_vmdq_set_replication_pf(hw, true);
- igb_vmdq_set_anti_spoofing_pf(hw, true,
- adapter->vfs_allocated_count);
- } else {
- igb_vmdq_set_loopback_pf(hw, false);
- igb_vmdq_set_replication_pf(hw, false);
+ /* Enable Malicious Driver Detection */
+ if ((adapter->vfs_allocated_count) &&
+ (adapter->mdd)) {
+ if (hw->mac.type == e1000_i350)
+ igb_enable_mdd(adapter);
}
+
+ /* enable replication and loopback support */
+ count = adapter->vfs_allocated_count || adapter->vmdq_pools;
+ if (adapter->flags & IGB_FLAG_LOOPBACK_ENABLE && count)
+ e1000_vmdq_set_loopback_pf(hw, 1);
+ e1000_vmdq_set_anti_spoofing_pf(hw,
+ adapter->vfs_allocated_count || adapter->vmdq_pools,
+ adapter->vfs_allocated_count);
+ e1000_vmdq_set_replication_pf(hw, adapter->vfs_allocated_count ||
+ adapter->vmdq_pools);
+}
+
+static void igb_init_fw(struct igb_adapter *adapter)
+{
+ struct e1000_fw_drv_info fw_cmd;
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u16 mask;
+
+ if (hw->mac.type == e1000_i210)
+ mask = E1000_SWFW_EEP_SM;
+ else
+ mask = E1000_SWFW_PHY0_SM;
+ /* i211 parts do not support this feature */
+ if (hw->mac.type == e1000_i211)
+ hw->mac.arc_subsystem_valid = false;
+
+ if (!hw->mac.ops.acquire_swfw_sync(hw, mask)) {
+ for (i = 0; i <= FW_MAX_RETRIES; i++) {
+ E1000_WRITE_REG(hw, E1000_FWSTS, E1000_FWSTS_FWRI);
+ fw_cmd.hdr.cmd = FW_CMD_DRV_INFO;
+ fw_cmd.hdr.buf_len = FW_CMD_DRV_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CMD_RESERVED;
+ fw_cmd.port_num = hw->bus.func;
+ fw_cmd.drv_version = FW_FAMILY_DRV_VER;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum =
+ e1000_calculate_checksum((u8 *)&fw_cmd,
+ (FW_HDR_LEN +
+ fw_cmd.hdr.buf_len));
+ e1000_host_interface_command(hw, (u8 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (fw_cmd.hdr.cmd_or_resp.ret_status
+ == FW_STATUS_SUCCESS)
+ break;
+ }
+ } else
+ dev_warn(pci_dev_to_dev(adapter->pdev),
+ "Unable to get semaphore, firmware init failed.\n");
+ hw->mac.ops.release_swfw_sync(hw, mask);
}
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
@@ -7643,34 +9792,40 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 status;
+
+ if (hw->mac.type == e1000_i211)
+ return;
if (hw->mac.type > e1000_82580) {
- if (adapter->flags & IGB_FLAG_DMAC) {
+ if (adapter->dmac != IGB_DMAC_DISABLE) {
u32 reg;
- /* force threshold to 0. */
- wr32(E1000_DMCTXTH, 0);
+ /* force threshold to 0. */
+ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
- /* DMA Coalescing high water mark needs to be greater
+ /*
+ * DMA Coalescing high water mark needs to be greater
* than the Rx threshold. Set hwm to PBA - max frame
* size in 16B units, capping it at PBA - 6KB.
*/
hwm = 64 * pba - adapter->max_frame_size / 16;
if (hwm < 64 * (pba - 6))
hwm = 64 * (pba - 6);
- reg = rd32(E1000_FCRTC);
+ reg = E1000_READ_REG(hw, E1000_FCRTC);
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
& E1000_FCRTC_RTH_COAL_MASK);
- wr32(E1000_FCRTC, reg);
+ E1000_WRITE_REG(hw, E1000_FCRTC, reg);
- /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
* frame size, capping it at PBA - 10KB.
*/
dmac_thr = pba - adapter->max_frame_size / 512;
if (dmac_thr < pba - 10)
dmac_thr = pba - 10;
- reg = rd32(E1000_DMACR);
+ reg = E1000_READ_REG(hw, E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
@@ -7678,46 +9833,84 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* transition to L0x or L1 if available..*/
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
- /* watchdog timer= +-1000 usec in 32usec intervals */
- reg |= (1000 >> 5);
+ /* Check if status is 2.5Gb backplane connection
+ * before configuration of watchdog timer, which is
+ * in msec values in 12.8usec intervals
+ * watchdog timer= msec values in 32usec intervals
+ * for non 2.5Gb connection
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= ((adapter->dmac * 5) >> 6);
+ else
+ reg |= ((adapter->dmac) >> 5);
+ } else {
+ reg |= ((adapter->dmac) >> 5);
+ }
- /* Disable BMC-to-OS Watchdog Enable */
+ /*
+ * Disable BMC-to-OS Watchdog enable
+ * on devices that support OS-to-BMC
+ */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+ E1000_WRITE_REG(hw, E1000_DMACR, reg);
- wr32(E1000_DMACR, reg);
-
- /* no lower threshold to disable
- * coalescing(smart fifb)-UTRESH=0
+ /* no lower threshold to disable coalescing
+ * (smart fifb)-UTRESH=0
*/
- wr32(E1000_DMCRTRH, 0);
+ E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
+
+ /* This sets the time to wait before requesting
+ * transition to low power state to number of usecs
+ * needed to receive 1 512 byte frame at gigabit
+ * line rate. On i350 device, time to make transition
+ * to Lx state is delayed by 4 usec with flush disable
+ * bit set to avoid losing mailbox interrupts
+ */
+ reg = E1000_READ_REG(hw, E1000_DMCTLX);
+ if (hw->mac.type == e1000_i350)
+ reg |= IGB_DMCTLX_DCFLUSH_DIS;
- reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+ /* in 2.5Gb connection, TTLX unit is 0.4 usec
+ * which is 0x4*2 = 0xA. But delay is still 4 usec
+ */
+ if (hw->mac.type == e1000_i354) {
+ status = E1000_READ_REG(hw, E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ (!(status & E1000_STATUS_2P5_SKU_OVER)))
+ reg |= 0xA;
+ else
+ reg |= 0x4;
+ } else {
+ reg |= 0x4;
+ }
- wr32(E1000_DMCTLX, reg);
+ E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
- /* free space in tx packet buffer to wake from
- * DMA coal
- */
- wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
- (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ /* free space in tx pkt buffer to wake from DMA coal */
+ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+ (IGB_TX_BUF_4096 + adapter->max_frame_size))
+ >> 6);
- /* make low power state decision controlled
- * by DMA coal
- */
- reg = rd32(E1000_PCIEMISC);
+ /* low power state decision controlled by DMA coal */
+ reg = E1000_READ_REG(hw, E1000_PCIEMISC);
reg &= ~E1000_PCIEMISC_LX_DECISION;
- wr32(E1000_PCIEMISC, reg);
+ E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
- u32 reg = rd32(E1000_PCIEMISC);
- wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
- wr32(E1000_DMACR, 0);
+ u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+
+ E1000_WRITE_REG(hw, E1000_PCIEMISC,
+ reg & ~E1000_PCIEMISC_LX_DECISION);
+ E1000_WRITE_REG(hw, E1000_DMACR, 0);
}
}
-/**
- * igb_read_i2c_byte - Reads 8 bit word over I2C
+#ifdef HAVE_I2C_SUPPORT
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @dev_addr: device address
@@ -7725,9 +9918,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*
* Performs byte read operation over I2C interface at
* a specified device address.
- **/
+ */
s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = adapter->i2c_client;
@@ -7754,8 +9947,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
}
}
-/**
- * igb_write_i2c_byte - Writes 8 bit word over I2C
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @dev_addr: device address
@@ -7763,9 +9955,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
*
* Performs byte write operation over I2C interface at
* a specified device address.
- **/
+ */
s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
struct i2c_client *this_client = adapter->i2c_client;
@@ -7784,6 +9976,29 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C;
else
return E1000_SUCCESS;
+}
+#endif /* HAVE_I2C_SUPPORT */
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err = 0;
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_reset_interrupt_capability(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ err = igb_open(netdev);
+
+ return err;
}
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_param.c b/drivers/net/ethernet/intel/igb/igb_param.c
new file mode 100644
index 000000000000..dfcc76a9f02f
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_param.c
@@ -0,0 +1,872 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/netdevice.h>
+
+#include "igb.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define IGB_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+#define MAX_NUM_LIST_OPTS 15
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when igb_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define IGB_PARAM(X, desc) \
+ static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
+ MODULE_PARM_DESC(X, desc);
+#else
+#define IGB_PARAM(X, desc) \
+ static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
+ static unsigned int num_##X; \
+ module_param_array_named(X, X, int, &num_##X, 0); \
+ MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+IGB_PARAM(InterruptThrottleRate,
+ "Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+/* #define MIN_ITR 120 */
+#define MIN_ITR 0
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+IGB_PARAM(IntMode,
+ "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
+#define MAX_INTMODE IGB_INT_MODE_MSIX
+#define MIN_INTMODE IGB_INT_MODE_LEGACY
+
+IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
+
+/* LLIPort (Low Latency Interrupt TCP Port)
+ *
+ * Valid Range: 0 - 65535
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPort,
+ "Low Latency Interrupt TCP Port (0-65535), default 0=off");
+
+#define DEFAULT_LLIPORT 0
+#define MAX_LLIPORT 0xFFFF
+#define MIN_LLIPORT 0
+
+/* LLIPush (Low Latency Interrupt on TCP Push flag)
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
+
+#define DEFAULT_LLIPUSH 0
+#define MAX_LLIPUSH 1
+#define MIN_LLIPUSH 0
+
+/* LLISize (Low Latency Interrupt on Packet Size)
+ *
+ * Valid Range: 0 - 1500
+ *
+ * Default Value: 0 (disabled)
+ */
+IGB_PARAM(LLISize,
+ "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
+
+#define DEFAULT_LLISIZE 0
+#define MAX_LLISIZE 1500
+#define MIN_LLISIZE 0
+
+/* RSS (Enable RSS multiqueue receive)
+ *
+ * Valid Range: 0 - 8
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(RSS,
+ "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
+
+#define DEFAULT_RSS 1
+#define MAX_RSS 8
+#define MIN_RSS 0
+
+/* VMDQ (Enable VMDq multiqueue receive)
+ *
+ * Valid Range: 0 - 8
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(VMDQ,
+ "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
+
+#define DEFAULT_VMDQ 0
+#define MAX_VMDQ MAX_RSS
+#define MIN_VMDQ 0
+
+/* max_vfs (Enable SR-IOV VF devices)
+ *
+ * Valid Range: 0 - 7
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(max_vfs,
+ "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
+
+#define DEFAULT_SRIOV 0
+#define MAX_SRIOV 7
+#define MIN_SRIOV 0
+
+/* MDD (Enable Malicious Driver Detection)
+ *
+ * Only available when SR-IOV is enabled - max_vfs is greater than 0
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(MDD,
+ "Malicious Driver Detection (0/1), default 1 = enabled. Only available when max_vfs is greater than 0");
+
+#ifdef DEBUG
+
+/* Disable Hardware Reset on Tx Hang
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled, i.e. h/w will reset)
+ */
+IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
+
+/* Dump Transmit and Receive buffers
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
+
+#endif /* DEBUG */
+
+/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
+ *
+ * Valid Range: 0 - 1
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(QueuePairs,
+ "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
+
+#define DEFAULT_QUEUE_PAIRS 1
+#define MAX_QUEUE_PAIRS 1
+#define MIN_QUEUE_PAIRS 0
+
+/* Enable/disable EEE (a.k.a. IEEE802.3az)
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1
+ */
+IGB_PARAM(EEE,
+ "Enable/disable on parts that support the feature");
+
+/* Enable/disable DMA Coalescing
+ *
+ * Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
+ * 9000, 10000(msec), 250(usec), 500(usec)
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(DMAC,
+ "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
+
+#ifndef IGB_NO_LRO
+/* Enable/disable Large Receive Offload
+ *
+ * Valid Values: 0(off), 1(on)
+ *
+ * Default Value: 0
+ */
+IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
+
+#endif
+struct igb_opt_list {
+ int i;
+ char *str;
+};
+struct igb_option {
+ enum { enable_option, range_option, list_option } type;
+ const char *name;
+ const char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct igb_opt_list *p;
+ } l;
+ } arg;
+};
+
+static int igb_validate_option(unsigned int *value,
+ struct igb_option *opt,
+ struct igb_adapter *adapter)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ DPRINTK(PROBE, INFO,
+ "%s set to %d\n", opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option: {
+ int i;
+ struct igb_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ DPRINTK(PROBE, INFO, "%s\n", ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
+ opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/**
+ * igb_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void igb_check_options(struct igb_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (bd >= IGB_MAX_NIC) {
+ DPRINTK(PROBE, NOTICE,
+ "Warning: no configuration for board #%d\n", bd);
+ DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+ bd = IGB_MAX_NIC;
+#endif
+ }
+
+ { /* Interrupt Throttling Rate */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Interrupt Throttling Rate (ints/sec)",
+ .err = "using default of "__MODULE_STRING(DEFAULT_ITR),
+ .def = DEFAULT_ITR,
+ .arg = { .r = { .min = MIN_ITR,
+ .max = MAX_ITR } }
+ };
+
+#ifdef module_param_array
+ if (num_InterruptThrottleRate > bd) {
+#endif
+ unsigned int itr = InterruptThrottleRate[bd];
+
+ switch (itr) {
+ case 0:
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ if (hw->mac.type >= e1000_i350)
+ adapter->dmac = IGB_DMAC_DISABLE;
+ adapter->rx_itr_setting = itr;
+ break;
+ case 1:
+ DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+ opt.name);
+ adapter->rx_itr_setting = itr;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->rx_itr_setting = itr;
+ break;
+ default:
+ igb_validate_option(&itr, &opt, adapter);
+ /* Save the setting, because the dynamic bits
+ * change itr. In case of invalid user value,
+ * default to conservative mode, else need to
+ * clear the lower two bits because they are
+ * used as control */
+ if (itr == 3) {
+ adapter->rx_itr_setting = itr;
+ } else {
+ adapter->rx_itr_setting = 1000000000
+ / (itr * 256);
+ adapter->rx_itr_setting &= ~3;
+ }
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->rx_itr_setting = opt.def;
+ }
+#endif
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+ }
+ { /* Interrupt Mode */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Interrupt Mode",
+ .err = "defaulting to 2 (MSI-X)",
+ .def = IGB_INT_MODE_MSIX,
+ .arg = { .r = { .min = MIN_INTMODE,
+ .max = MAX_INTMODE } }
+ };
+
+#ifdef module_param_array
+ if (num_IntMode > bd) {
+#endif
+ unsigned int int_mode = IntMode[bd];
+ igb_validate_option(&int_mode, &opt, adapter);
+ adapter->int_mode = int_mode;
+#ifdef module_param_array
+ } else {
+ adapter->int_mode = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt TCP Port */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt TCP Port",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_LLIPORT),
+ .def = DEFAULT_LLIPORT,
+ .arg = { .r = { .min = MIN_LLIPORT,
+ .max = MAX_LLIPORT } }
+ };
+
+#ifdef module_param_array
+ if (num_LLIPort > bd) {
+#endif
+ adapter->lli_port = LLIPort[bd];
+ if (adapter->lli_port) {
+ igb_validate_option(&adapter->lli_port, &opt,
+ adapter);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_port = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt on Packet Size */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "Low Latency Interrupt on Packet Size",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_LLISIZE),
+ .def = DEFAULT_LLISIZE,
+ .arg = { .r = { .min = MIN_LLISIZE,
+ .max = MAX_LLISIZE } }
+ };
+
+#ifdef module_param_array
+ if (num_LLISize > bd) {
+#endif
+ adapter->lli_size = LLISize[bd];
+ if (adapter->lli_size) {
+ igb_validate_option(&adapter->lli_size, &opt,
+ adapter);
+ } else {
+ DPRINTK(PROBE, INFO, "%s turned off\n",
+ opt.name);
+ }
+#ifdef module_param_array
+ } else {
+ adapter->lli_size = opt.def;
+ }
+#endif
+ }
+ { /* Low Latency Interrupt on TCP Push flag */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "Low Latency Interrupt on TCP Push flag",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+
+#ifdef module_param_array
+ if (num_LLIPush > bd) {
+#endif
+ unsigned int lli_push = LLIPush[bd];
+ igb_validate_option(&lli_push, &opt, adapter);
+ adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
+ }
+#endif
+ }
+ { /* SRIOV - Enable SR-IOV VF devices */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "max_vfs - SR-IOV VF devices",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_SRIOV),
+ .def = DEFAULT_SRIOV,
+ .arg = { .r = { .min = MIN_SRIOV,
+ .max = MAX_SRIOV } }
+ };
+
+#ifdef module_param_array
+ if (num_max_vfs > bd) {
+#endif
+ adapter->vfs_allocated_count = max_vfs[bd];
+ igb_validate_option(&adapter->vfs_allocated_count,
+ &opt, adapter);
+
+#ifdef module_param_array
+ } else {
+ adapter->vfs_allocated_count = opt.def;
+ }
+#endif
+ if (adapter->vfs_allocated_count) {
+ switch (hw->mac.type) {
+ case e1000_82575:
+ case e1000_82580:
+ case e1000_i210:
+ case e1000_i211:
+ case e1000_i354:
+ adapter->vfs_allocated_count = 0;
+ DPRINTK(PROBE, INFO,
+ "SR-IOV option max_vfs not supported.\n");
+ /* Fall through */
+ default:
+ break;
+ }
+ }
+ }
+ { /* VMDQ - Enable VMDq multiqueue receive */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "VMDQ - VMDq multiqueue queue count",
+ .err = "using default of "__MODULE_STRING(DEFAULT_VMDQ),
+ .def = DEFAULT_VMDQ,
+ .arg = { .r = { .min = MIN_VMDQ,
+ .max = (MAX_VMDQ
+ - adapter->vfs_allocated_count)} }
+ };
+ if ((hw->mac.type != e1000_i210) ||
+ (hw->mac.type != e1000_i211)) {
+#ifdef module_param_array
+ if (num_VMDQ > bd) {
+#endif
+ adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
+ if (adapter->vfs_allocated_count &&
+ !adapter->vmdq_pools) {
+ DPRINTK(PROBE, INFO,
+ "Enabling SR-IOV requires VMDq be set to at least 1\n");
+ adapter->vmdq_pools = 1;
+ }
+ igb_validate_option(&adapter->vmdq_pools, &opt,
+ adapter);
+
+#ifdef module_param_array
+ } else {
+ if (!adapter->vfs_allocated_count)
+ adapter->vmdq_pools = (opt.def == 1 ? 0
+ : opt.def);
+ else
+ adapter->vmdq_pools = 1;
+ }
+#endif
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+ if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
+ DPRINTK(PROBE, INFO,
+ "VMDq not supported on this part.\n");
+ adapter->vmdq_pools = 0;
+ }
+#endif
+
+ } else {
+ DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
+ adapter->vmdq_pools = opt.def;
+ }
+ }
+ { /* RSS - Enable RSS multiqueue receives */
+ struct igb_option opt = {
+ .type = range_option,
+ .name = "RSS - RSS multiqueue receive count",
+ .err = "using default of "__MODULE_STRING(DEFAULT_RSS),
+ .def = DEFAULT_RSS,
+ .arg = { .r = { .min = MIN_RSS,
+ .max = MAX_RSS } }
+ };
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools) {
+ if (adapter->vmdq_pools <= 2) {
+ if (adapter->vmdq_pools == 2)
+ opt.arg.r.max = 3;
+ } else {
+ opt.arg.r.max = 1;
+ }
+ } else {
+ opt.arg.r.max = 4;
+ }
+#else
+ opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ break;
+ case e1000_i210:
+ opt.arg.r.max = 4;
+ break;
+ case e1000_i211:
+ opt.arg.r.max = 2;
+ break;
+ case e1000_82576:
+#ifndef CONFIG_IGB_VMDQ_NETDEV
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 2;
+ break;
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ default:
+ if (!!adapter->vmdq_pools)
+ opt.arg.r.max = 1;
+ break;
+ }
+
+ if (adapter->int_mode != IGB_INT_MODE_MSIX) {
+ DPRINTK(PROBE, INFO,
+ "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
+ opt.err);
+ opt.arg.r.max = 1;
+ }
+
+#ifdef module_param_array
+ if (num_RSS > bd) {
+#endif
+ adapter->rss_queues = RSS[bd];
+ switch (adapter->rss_queues) {
+ case 1:
+ break;
+ default:
+ igb_validate_option(&adapter->rss_queues, &opt,
+ adapter);
+ if (adapter->rss_queues)
+ break;
+ case 0:
+ adapter->rss_queues = min_t(u32, opt.arg.r.max,
+ num_online_cpus());
+ break;
+ }
+#ifdef module_param_array
+ } else {
+ adapter->rss_queues = opt.def;
+ }
+#endif
+ }
+ { /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name =
+ "QueuePairs - Tx/Rx queue pairs for interrupt handling",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+#ifdef module_param_array
+ if (num_QueuePairs > bd) {
+#endif
+ unsigned int qp = QueuePairs[bd];
+ /*
+ * We must enable queue pairs if the number of queues
+ * exceeds the number of available interrupts. We are
+ * limited to 10, or 3 per unallocated vf. On I210 and
+ * I211 devices, we are limited to 5 interrupts.
+ * However, since I211 only supports 2 queues, we do not
+ * need to check and override the user option.
+ */
+ if (qp == OPTION_DISABLED) {
+ if (adapter->rss_queues > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->vmdq_pools > 4)
+ qp = OPTION_ENABLED;
+
+ if (adapter->rss_queues > 1 &&
+ (adapter->vmdq_pools > 3 ||
+ adapter->vfs_allocated_count > 6))
+ qp = OPTION_ENABLED;
+
+ if (hw->mac.type == e1000_i210 &&
+ adapter->rss_queues > 2)
+ qp = OPTION_ENABLED;
+
+ if (qp == OPTION_ENABLED)
+ DPRINTK(PROBE, INFO,
+ "Number of queues exceeds available interrupts, %s\n",
+ opt.err);
+ }
+ igb_validate_option(&qp, &opt, adapter);
+ adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
+ }
+#endif
+ }
+ { /* EEE - Enable EEE for capable adapters */
+
+ if (hw->mac.type >= e1000_i350) {
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "EEE Support",
+ .err = "defaulting to Enabled",
+ .def = OPTION_ENABLED
+ };
+#ifdef module_param_array
+ if (num_EEE > bd) {
+#endif
+ unsigned int eee = EEE[bd];
+ igb_validate_option(&eee, &opt, adapter);
+ adapter->flags |= eee ? IGB_FLAG_EEE : 0;
+ if (eee)
+ hw->dev_spec._82575.eee_disable = false;
+ else
+ hw->dev_spec._82575.eee_disable = true;
+
+#ifdef module_param_array
+ } else {
+ adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
+ if (adapter->flags & IGB_FLAG_EEE)
+ hw->dev_spec._82575.eee_disable = false;
+ else
+ hw->dev_spec._82575.eee_disable = true;
+ }
+#endif
+ }
+ }
+ { /* DMAC - Enable DMA Coalescing for capable adapters */
+
+ if (hw->mac.type >= e1000_i350) {
+ struct igb_opt_list list[] = {
+ { IGB_DMAC_DISABLE, "DMAC Disable"},
+ { IGB_DMAC_MIN, "DMAC 250 usec"},
+ { IGB_DMAC_500, "DMAC 500 usec"},
+ { IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
+ { IGB_DMAC_2000, "DMAC 2000 usec"},
+ { IGB_DMAC_3000, "DMAC 3000 usec"},
+ { IGB_DMAC_4000, "DMAC 4000 usec"},
+ { IGB_DMAC_5000, "DMAC 5000 usec"},
+ { IGB_DMAC_6000, "DMAC 6000 usec"},
+ { IGB_DMAC_7000, "DMAC 7000 usec"},
+ { IGB_DMAC_8000, "DMAC 8000 usec"},
+ { IGB_DMAC_9000, "DMAC 9000 usec"},
+ { IGB_DMAC_MAX, "DMAC 10000 usec"}
+ };
+ struct igb_option opt = {
+ .type = list_option,
+ .name = "DMA Coalescing",
+ .err = "using default of "
+ __MODULE_STRING(IGB_DMAC_DISABLE),
+ .def = IGB_DMAC_DISABLE,
+ .arg = { .l = { .nr = 13,
+ .p = list
+ }
+ }
+ };
+#ifdef module_param_array
+ if (num_DMAC > bd) {
+#endif
+ unsigned int dmac = DMAC[bd];
+ if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
+ dmac = IGB_DMAC_DISABLE;
+ igb_validate_option(&dmac, &opt, adapter);
+ switch (dmac) {
+ case IGB_DMAC_DISABLE:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_MIN:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_500:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_EN_DEFAULT:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_2000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_3000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_4000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_5000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_6000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_7000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_8000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_9000:
+ adapter->dmac = dmac;
+ break;
+ case IGB_DMAC_MAX:
+ adapter->dmac = dmac;
+ break;
+ default:
+ adapter->dmac = opt.def;
+ DPRINTK(PROBE, INFO,
+ "Invalid DMAC setting, resetting DMAC to %d\n",
+ opt.def);
+ }
+#ifdef module_param_array
+ } else
+ adapter->dmac = opt.def;
+#endif
+ }
+ }
+#ifndef IGB_NO_LRO
+ { /* LRO - Enable Large Receive Offload */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "LRO - Large Receive Offload",
+ .err = "defaulting to Disabled",
+ .def = OPTION_DISABLED
+ };
+ struct net_device *netdev = adapter->netdev;
+#ifdef module_param_array
+ if (num_LRO > bd) {
+#endif
+ unsigned int lro = LRO[bd];
+ igb_validate_option(&lro, &opt, adapter);
+ netdev->features |= lro ? NETIF_F_LRO : 0;
+#ifdef module_param_array
+ } else if (opt.def == OPTION_ENABLED) {
+ netdev->features |= NETIF_F_LRO;
+ }
+#endif
+ }
+#endif /* IGB_NO_LRO */
+ { /* MDD - Enable Malicious Driver Detection. Only available when
+ SR-IOV is enabled. */
+ struct igb_option opt = {
+ .type = enable_option,
+ .name = "Malicious Driver Detection",
+ .err = "defaulting to 1",
+ .def = OPTION_ENABLED,
+ .arg = { .r = { .min = OPTION_DISABLED,
+ .max = OPTION_ENABLED } }
+ };
+
+#ifdef module_param_array
+ if (num_MDD > bd) {
+#endif
+ adapter->mdd = MDD[bd];
+ igb_validate_option((uint *)&adapter->mdd, &opt,
+ adapter);
+#ifdef module_param_array
+ } else {
+ adapter->mdd = opt.def;
+ }
+#endif
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/igb/igb_procfs.c b/drivers/net/ethernet/intel/igb/igb_procfs.c
new file mode 100644
index 000000000000..9302725dd0e0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_procfs.c
@@ -0,0 +1,356 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#ifdef IGB_PROCFS
+#ifndef IGB_HWMON
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+static struct proc_dir_entry *igb_top_dir;
+
+bool igb_thermal_present(struct igb_adapter *adapter)
+{
+ s32 status;
+ struct e1000_hw *hw;
+
+ if (adapter == NULL)
+ return false;
+ hw = &adapter->hw;
+
+ /*
+ * Only set I2C bit-bang mode if an external thermal sensor is
+ * supported on this device.
+ */
+ if (adapter->ets) {
+ status = e1000_set_i2c_bb(hw);
+ if (status != E1000_SUCCESS)
+ return false;
+ }
+
+ status = hw->mac.ops.init_thermal_sensor_thresh(hw);
+ if (status != E1000_SUCCESS)
+ return false;
+
+ return true;
+}
+
+static int igb_macburn(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
+ (unsigned int)hw->mac.perm_addr[0],
+ (unsigned int)hw->mac.perm_addr[1],
+ (unsigned int)hw->mac.perm_addr[2],
+ (unsigned int)hw->mac.perm_addr[3],
+ (unsigned int)hw->mac.perm_addr[4],
+ (unsigned int)hw->mac.perm_addr[5]);
+}
+
+static int igb_macadmn(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n",
+ (unsigned int)hw->mac.addr[0],
+ (unsigned int)hw->mac.addr[1],
+ (unsigned int)hw->mac.addr[2],
+ (unsigned int)hw->mac.addr[3],
+ (unsigned int)hw->mac.addr[4],
+ (unsigned int)hw->mac.addr[5]);
+}
+
+static int igb_numeports(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct e1000_hw *hw;
+ int ports;
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ hw = &adapter->hw;
+ if (hw == NULL)
+ return snprintf(page, count, "error: no hw data\n");
+
+ ports = 4;
+
+ return snprintf(page, count, "%d\n", ports);
+}
+
+static int igb_porttype(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ if (adapter == NULL)
+ return snprintf(page, count, "error: no adapter\n");
+
+ return snprintf(page, count, "%d\n",
+ test_bit(__IGB_DOWN, &adapter->state));
+}
+
+static int igb_therm_location(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n", therm_data->sensor_data->location);
+}
+
+static int igb_therm_maxopthresh(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n",
+ therm_data->sensor_data->max_op_thresh);
+}
+
+static int igb_therm_cautionthresh(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ return snprintf(page, count, "%d\n",
+ therm_data->sensor_data->caution_thresh);
+}
+
+static int igb_therm_temp(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ s32 status;
+ struct igb_therm_proc_data *therm_data =
+ (struct igb_therm_proc_data *)data;
+
+ if (therm_data == NULL)
+ return snprintf(page, count, "error: no therm_data\n");
+
+ status = e1000_get_thermal_sensor_data(therm_data->hw);
+ if (status != E1000_SUCCESS)
+ snprintf(page, count, "error: status %d returned\n", status);
+
+ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp);
+}
+
+struct igb_proc_type {
+ char name[32];
+ int (*read)(char*, char**, off_t, int, int*, void*);
+};
+
+struct igb_proc_type igb_proc_entries[] = {
+ {"numeports", &igb_numeports},
+ {"porttype", &igb_porttype},
+ {"macburn", &igb_macburn},
+ {"macadmn", &igb_macadmn},
+ {"", NULL}
+};
+
+struct igb_proc_type igb_internal_entries[] = {
+ {"location", &igb_therm_location},
+ {"temp", &igb_therm_temp},
+ {"cautionthresh", &igb_therm_cautionthresh},
+ {"maxopthresh", &igb_therm_maxopthresh},
+ {"", NULL}
+};
+
+void igb_del_proc_entries(struct igb_adapter *adapter)
+{
+ int index, i;
+ char buf[16]; /* much larger than the sensor number will ever be */
+
+ if (igb_top_dir == NULL)
+ return;
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+ if (adapter->therm_dir[i] == NULL)
+ continue;
+
+ for (index = 0; ; index++) {
+ if (igb_internal_entries[index].read == NULL)
+ break;
+
+ remove_proc_entry(igb_internal_entries[index].name,
+ adapter->therm_dir[i]);
+ }
+ snprintf(buf, sizeof(buf), "sensor_%d", i);
+ remove_proc_entry(buf, adapter->info_dir);
+ }
+
+ if (adapter->info_dir != NULL) {
+ for (index = 0; ; index++) {
+ if (igb_proc_entries[index].read == NULL)
+ break;
+ remove_proc_entry(igb_proc_entries[index].name,
+ adapter->info_dir);
+ }
+ remove_proc_entry("info", adapter->eth_dir);
+ }
+
+ if (adapter->eth_dir != NULL)
+ remove_proc_entry(pci_name(adapter->pdev), igb_top_dir);
+}
+
+/* called from igb_main.c */
+void igb_procfs_exit(struct igb_adapter *adapter)
+{
+ igb_del_proc_entries(adapter);
+}
+
+int igb_procfs_topdir_init(void)
+{
+ igb_top_dir = proc_mkdir("driver/igb", NULL);
+ if (igb_top_dir == NULL)
+ return (-ENOMEM);
+
+ return 0;
+}
+
+void igb_procfs_topdir_exit(void)
+{
+ remove_proc_entry("driver/igb", NULL);
+}
+
+/* called from igb_main.c */
+int igb_procfs_init(struct igb_adapter *adapter)
+{
+ int rc = 0;
+ int i;
+ int index;
+ char buf[16]; /* much larger than the sensor number will ever be */
+
+ adapter->eth_dir = NULL;
+ adapter->info_dir = NULL;
+ for (i = 0; i < E1000_MAX_SENSORS; i++)
+ adapter->therm_dir[i] = NULL;
+
+ if (igb_top_dir == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), igb_top_dir);
+ if (adapter->eth_dir == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ adapter->info_dir = proc_mkdir("info", adapter->eth_dir);
+ if (adapter->info_dir == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (index = 0; ; index++) {
+ if (igb_proc_entries[index].read == NULL)
+ break;
+ if (!(create_proc_read_entry(igb_proc_entries[index].name,
+ 0444,
+ adapter->info_dir,
+ igb_proc_entries[index].read,
+ adapter))) {
+
+ rc = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (igb_thermal_present(adapter) == false)
+ goto exit;
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ snprintf(buf, sizeof(buf), "sensor_%d", i);
+ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir);
+ if (adapter->therm_dir[i] == NULL) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (index = 0; ; index++) {
+ if (igb_internal_entries[index].read == NULL)
+ break;
+ /*
+ * therm_data struct contains pointer the read func
+ * will be needing
+ */
+ adapter->therm_data[i].hw = &adapter->hw;
+ adapter->therm_data[i].sensor_data =
+ &adapter->hw.mac.thermal_sensor_data.sensor[i];
+
+ if (!(create_proc_read_entry(
+ igb_internal_entries[index].name,
+ 0444,
+ adapter->therm_dir[i],
+ igb_internal_entries[index].read,
+ &adapter->therm_data[i]))) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ }
+ }
+ goto exit;
+
+fail:
+ igb_del_proc_entries(adapter);
+exit:
+ return rc;
+}
+
+#endif /* !IGB_HWMON */
+#endif /* IGB_PROCFS */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab9..744fa654b77c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1,32 +1,46 @@
-/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
- *
- * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/******************************************************************************
+ Copyright(c) 2011 Richard Cochran <richardcochran@gmail.com> for some of the
+ 82576 and 82580 code
+******************************************************************************/
+
+#include "igb.h"
+
+#ifdef HAVE_PTP_1588_CLOCK
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/ptp_classify.h>
-
-#include "igb.h"
+#include <linux/clocksource.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-/* The 82580 timesync updates the system timer every 8ns by 8ns,
+/*
+ * The 82580 timesync updates the system timer every 8ns by 8ns,
* and this update value cannot be reprogrammed.
*
* Neither the 82576 nor the 82580 offer registers wide enough to hold
@@ -75,7 +89,10 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
-/* SYSTIM read access for the 82576 */
+/*
+ * SYSTIM read access for the 82576
+ */
+
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
@@ -83,8 +100,8 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
u64 val;
u32 lo, hi;
- lo = rd32(E1000_SYSTIML);
- hi = rd32(E1000_SYSTIMH);
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
val = ((u64) hi) << 32;
val |= lo;
@@ -92,21 +109,24 @@ static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
return val;
}
-/* SYSTIM read access for the 82580 */
+/*
+ * SYSTIM read access for the 82580
+ */
+
static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
u64 val;
- u32 lo, hi, jk;
+ u32 lo, hi;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
- lo = rd32(E1000_SYSTIML);
- hi = rd32(E1000_SYSTIMH);
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ lo = E1000_READ_REG(hw, E1000_SYSTIML);
+ hi = E1000_READ_REG(hw, E1000_SYSTIMH);
val = ((u64) hi) << 32;
val |= lo;
@@ -114,19 +134,22 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
return val;
}
-/* SYSTIM read access for I210/I211 */
+/*
+ * SYSTIM read access for I210/I211
+ */
+
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec, jk;
+ u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
- nsec = rd32(E1000_SYSTIML);
- sec = rd32(E1000_SYSTIMH);
+ E1000_READ_REG(hw, E1000_SYSTIMR);
+ nsec = E1000_READ_REG(hw, E1000_SYSTIML);
+ sec = E1000_READ_REG(hw, E1000_SYSTIMH);
ts->tv_sec = sec;
ts->tv_nsec = nsec;
@@ -137,11 +160,12 @@ static void igb_ptp_write_i210(struct igb_adapter *adapter,
{
struct e1000_hw *hw = &adapter->hw;
- /* Writing the SYSTIMR register is not necessary as it only provides
+ /*
+ * Writing the SYSTIMR register is not necessary as it only provides
* sub-nanosecond resolution.
*/
- wr32(E1000_SYSTIML, ts->tv_nsec);
- wr32(E1000_SYSTIMH, ts->tv_sec);
+ E1000_WRITE_REG(hw, E1000_SYSTIML, ts->tv_nsec);
+ E1000_WRITE_REG(hw, E1000_SYSTIMH, ts->tv_sec);
}
/**
@@ -171,8 +195,8 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
- case e1000_i354:
case e1000_i350:
+ case e1000_i354:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, systim);
@@ -194,7 +218,10 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
}
-/* PTP clock operations */
+/*
+ * PTP clock operations
+ */
+
static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
@@ -219,7 +246,8 @@ static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
else
incvalue += rate;
- wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576
+ | (incvalue & INCVALUE_82576_MASK));
return 0;
}
@@ -241,11 +269,24 @@ static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb)
rate <<= 26;
rate = div_u64(rate, 1953125);
+ /* At 2.5G speeds, the TIMINCA register on I354 updates the clock 2.5x
+ * as quickly. Account for this by dividing the adjustment by 2.5.
+ */
+ if (hw->mac.type == e1000_i354) {
+ u32 status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ rate <<= 1;
+ rate = div_u64(rate, 5);
+ }
+ }
+
inca = rate & INCVALUE_MASK;
if (neg_adj)
inca |= ISGN;
- wr32(E1000_TIMINCA, inca);
+ E1000_WRITE_REG(hw, E1000_TIMINCA, inca);
return 0;
}
@@ -286,14 +327,13 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec *ts)
+static int igb_ptp_gettime64_82576(struct ptp_clock_info *ptp,
+ struct timespec64 *ts64)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
u64 ns;
- u32 remainder;
spin_lock_irqsave(&igb->tmreg_lock, flags);
@@ -301,28 +341,99 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
- ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
- ts->tv_nsec = remainder;
+ *ts64 = ns_to_timespec64(ns);
return 0;
}
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec *ts)
+static int igb_ptp_gettime64_i210(struct ptp_clock_info *ptp,
+ struct timespec64 *ts64)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct timespec ts;
+ unsigned long flags;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_read_i210(igb, &ts);
+ *ts64 = timespec_to_timespec64(ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+static int igb_ptp_settime64_82576(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts64)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
unsigned long flags;
+ u64 ns;
+
+ ns = timespec64_to_ns(ts64);
spin_lock_irqsave(&igb->tmreg_lock, flags);
- igb_ptp_read_i210(igb, ts);
+ timecounter_init(&igb->tc, &igb->cc, ns);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
}
+#endif
+static int igb_ptp_settime64_i210(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts64)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct timespec ts;
+ unsigned long flags;
+
+ ts = timespec64_to_timespec(*ts64);
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ igb_ptp_write_i210(igb, &ts);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ return 0;
+}
+
+#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64
+static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct timespec64 ts64;
+ int err;
+
+ err = igb_ptp_gettime64_82576(ptp, &ts64);
+ if (err)
+ return err;
+
+ *ts = timespec64_to_timespec(ts64);
+
+ return 0;
+}
+
+static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
+ struct timespec *ts)
+{
+ struct timespec64 ts64;
+ int err;
+
+ err = igb_ptp_gettime64_i210(ptp, &ts64);
+ if (err)
+ return err;
+
+ *ts = timespec64_to_timespec(ts64);
+
+ return 0;
+}
+
static int igb_ptp_settime_82576(struct ptp_clock_info *ptp,
const struct timespec *ts)
{
@@ -359,6 +470,7 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
+#endif
static int igb_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -371,7 +483,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
*
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
- **/
+ */
void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
@@ -386,12 +498,13 @@ void igb_ptp_tx_work(struct work_struct *work)
IGB_PTP_TX_TIMEOUT)) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
adapter->tx_hwtstamp_timeouts++;
- dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
return;
}
- tsynctxctl = rd32(E1000_TSYNCTXCTL);
+ tsynctxctl = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
igb_ptp_tx_hwtstamp(adapter);
else
@@ -399,15 +512,16 @@ void igb_ptp_tx_work(struct work_struct *work)
schedule_work(&adapter->ptp_tx_work);
}
-static void igb_ptp_overflow_check(struct work_struct *work)
+static void igb_ptp_overflow_check_82576(struct work_struct *work)
{
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
- struct timespec ts;
+ struct timespec64 ts64;
- igb->ptp_caps.gettime(&igb->ptp_caps, &ts);
+ igb_ptp_gettime64_82576(&igb->ptp_caps, &ts64);
- pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+ pr_debug("igb overflow check at %lld.%09lu\n",
+ (long long)ts64.tv_sec, ts64.tv_nsec);
schedule_delayed_work(&igb->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -421,14 +535,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
* dropped an Rx packet that was timestamped when the ring is full. The
* particular error is rare but leaves the device in a state unable to timestamp
* any future packets.
- **/
+ */
void igb_ptp_rx_hang(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct igb_ring *rx_ring;
- u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
+ u32 tsyncrxctl = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
unsigned long rx_event;
- int n;
if (hw->mac.type != e1000_82576)
return;
@@ -443,18 +555,15 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
/* Determine the most recent watchdog or rx_timestamp event */
rx_event = adapter->last_rx_ptp_check;
- for (n = 0; n < adapter->num_rx_queues; n++) {
- rx_ring = adapter->rx_ring[n];
- if (time_after(rx_ring->last_rx_timestamp, rx_event))
- rx_event = rx_ring->last_rx_timestamp;
- }
+ if (time_after(adapter->last_rx_timestamp, rx_event))
+ rx_event = adapter->last_rx_timestamp;
/* Only need to read the high RXSTMP register to clear the lock */
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
- rd32(E1000_RXSTMPH);
+ E1000_READ_REG(hw, E1000_RXSTMPH);
adapter->last_rx_ptp_check = jiffies;
adapter->rx_hwtstamp_cleared++;
- dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
}
}
@@ -465,20 +574,21 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* If we were asked to do hardware stamping and such a time stamp is
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
- **/
+ */
void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
- regval = rd32(E1000_TXSTMPL);
- regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_TXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
/**
@@ -490,14 +600,15 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
- **/
+ */
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
- /* The timestamp is recorded in little endian format.
+ /*
+ * The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
@@ -512,7 +623,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
*
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
- **/
+ */
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
@@ -520,7 +631,8 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- /* If this bit is set, then the RX registers contain the time stamp. No
+ /*
+ * If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
* read the registers to make them available again. Because only one
* packet can be time stamped at a time, we know that the register
@@ -530,20 +642,42 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ if (!(E1000_READ_REG(hw, E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval |= (u64)E1000_READ_REG(hw, E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+ /* Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ adapter->last_rx_timestamp = jiffies;
}
/**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * igb_ptp_get_ts_config - get hardware time stamping config
* @netdev:
* @ifreq:
- * @cmd:
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * igb_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -556,13 +690,12 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- **/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ *
+ */
+static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -570,14 +703,11 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -586,7 +716,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
@@ -610,23 +740,24 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
- /* 82576 cannot timestamp all packets, which it needs to do to
+ /*
+ * 82576 cannot timestamp all packets, which it needs to do to
* support both V1 Sync and Delay_Req messages
*/
if (hw->mac.type != e1000_82576) {
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
break;
}
/* fall through */
default:
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -636,76 +767,103 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
return 0;
}
- /* Per-packet timestamping only works if all packets are
+ /*
+ * Per-packet timestamping only works if all packets are
* timestamped, so enable timestamping in all packets as
- * long as one Rx filter was configured.
+ * long as one rx filter was configured.
*/
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
is_l2 = true;
is_l4 = true;
if ((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
- regval = rd32(E1000_RXPBS);
+ regval = E1000_READ_REG(hw, E1000_RXPBS);
regval |= E1000_RXPBS_CFG_TS_EN;
- wr32(E1000_RXPBS, regval);
+ E1000_WRITE_REG(hw, E1000_RXPBS, regval);
}
}
/* enable/disable TX */
- regval = rd32(E1000_TSYNCTXCTL);
+ regval = E1000_READ_REG(hw, E1000_TSYNCTXCTL);
regval &= ~E1000_TSYNCTXCTL_ENABLED;
regval |= tsync_tx_ctl;
- wr32(E1000_TSYNCTXCTL, regval);
+ E1000_WRITE_REG(hw, E1000_TSYNCTXCTL, regval);
/* enable/disable RX */
- regval = rd32(E1000_TSYNCRXCTL);
+ regval = E1000_READ_REG(hw, E1000_TSYNCRXCTL);
regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
regval |= tsync_rx_ctl;
- wr32(E1000_TSYNCRXCTL, regval);
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCTL, regval);
/* define which PTP packets are time stamped */
- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+ E1000_WRITE_REG(hw, E1000_TSYNCRXCFG, tsync_rx_cfg);
/* define ethertype filter for timestamped packets */
if (is_l2)
- wr32(E1000_ETQF(3),
+ E1000_WRITE_REG(hw, E1000_ETQF(3),
(E1000_ETQF_FILTER_ENABLE | /* enable filter */
E1000_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
- wr32(E1000_ETQF(3), 0);
+ E1000_WRITE_REG(hw, E1000_ETQF(3), 0);
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
- | E1000_FTQF_VF_BP /* VF not compared */
- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
- | E1000_FTQF_MASK); /* mask all inputs */
+ | E1000_FTQF_VF_BP /* VF not compared */
+ | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamp */
+ | E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
- wr32(E1000_IMIREXT(3),
+ E1000_WRITE_REG(hw, E1000_IMIR(3), htons(PTP_EV_PORT));
+ E1000_WRITE_REG(hw, E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
+ E1000_WRITE_REG(hw, E1000_SPQF(3), htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
- wr32(E1000_FTQF(3), ftqf);
+ E1000_WRITE_REG(hw, E1000_FTQF(3), ftqf);
} else {
- wr32(E1000_FTQF(3), E1000_FTQF_MASK);
+ E1000_WRITE_REG(hw, E1000_FTQF(3), E1000_FTQF_MASK);
}
- wrfl();
+ E1000_WRITE_FLUSH(hw);
/* clear TX/RX time stamp registers, just to be sure */
- regval = rd32(E1000_TXSTMPL);
- regval = rd32(E1000_TXSTMPH);
- regval = rd32(E1000_RXSTMPL);
- regval = rd32(E1000_RXSTMPH);
+ regval = E1000_READ_REG(hw, E1000_TXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_TXSTMPH);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPL);
+ regval = E1000_READ_REG(hw, E1000_RXSTMPH);
+
+ return 0;
+}
+
+/**
+ * igb_ptp_set_ts_config - set hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ **/
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igb_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -725,19 +883,25 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
+ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
+#else
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
+#endif
adapter->ptp_caps.enable = igb_ptp_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
/* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ E1000_WRITE_REG(hw, E1000_TIMINCA,
+ INCPERIOD_82576 | INCVALUE_82576);
break;
case e1000_82580:
- case e1000_i354:
case e1000_i350:
+ case e1000_i354:
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
@@ -745,15 +909,20 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_82576;
+ adapter->ptp_caps.settime64 = igb_ptp_settime64_82576;
+#else
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
+#endif
adapter->ptp_caps.enable = igb_ptp_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
/* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
break;
case e1000_i210:
case e1000_i211:
@@ -764,33 +933,38 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
+#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64
+ adapter->ptp_caps.gettime64 = igb_ptp_gettime64_i210;
+ adapter->ptp_caps.settime64 = igb_ptp_settime64_i210;
+#else
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
+#endif
adapter->ptp_caps.enable = igb_ptp_enable;
/* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
break;
default:
adapter->ptp_clock = NULL;
return;
}
- wrfl();
+ E1000_WRITE_FLUSH(hw);
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
/* Initialize the clock and overflow work for devices that need it. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
+ struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
- igb_ptp_overflow_check);
+ igb_ptp_overflow_check_82576);
schedule_delayed_work(&adapter->ptp_overflow_work,
IGB_SYSTIM_OVERFLOW_PERIOD);
@@ -798,10 +972,13 @@ void igb_ptp_init(struct igb_adapter *adapter)
/* Initialize the time sync interrupts for devices that support it. */
if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
- wr32(E1000_IMS, E1000_IMS_TS);
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
}
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -825,8 +1002,8 @@ void igb_ptp_stop(struct igb_adapter *adapter)
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
- case e1000_i354:
case e1000_i350:
+ case e1000_i354:
cancel_delayed_work_sync(&adapter->ptp_overflow_work);
break;
case e1000_i210:
@@ -841,6 +1018,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
+ clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
if (adapter->ptp_clock) {
@@ -864,20 +1042,24 @@ void igb_ptp_reset(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_PTP))
return;
+ /* reset the tstamp_config */
+ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
+
switch (adapter->hw.mac.type) {
case e1000_82576:
/* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ E1000_WRITE_REG(hw, E1000_TIMINCA, INCPERIOD_82576 |
+ INCVALUE_82576);
break;
case e1000_82580:
- case e1000_i354:
case e1000_i350:
+ case e1000_i354:
case e1000_i210:
case e1000_i211:
/* Enable the timer functions and interrupts. */
- wr32(E1000_TSAUXC, 0x0);
- wr32(E1000_TSIM, E1000_TSIM_TXTS);
- wr32(E1000_IMS, E1000_IMS_TS);
+ E1000_WRITE_REG(hw, E1000_TSAUXC, 0x0);
+ E1000_WRITE_REG(hw, E1000_TSIM, E1000_TSIM_TXTS);
+ E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_TS);
break;
default:
/* No work to do. */
@@ -886,11 +1068,12 @@ void igb_ptp_reset(struct igb_adapter *adapter)
/* Re-initialize the timer. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec ts = ktime_to_timespec(ktime_get_real());
+ struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
+ igb_ptp_settime64_i210(&adapter->ptp_caps, &ts64);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
}
}
+#endif /* HAVE_PTP_1588_CLOCK */
diff --git a/drivers/net/ethernet/intel/igb/igb_regtest.h b/drivers/net/ethernet/intel/igb/igb_regtest.h
new file mode 100644
index 000000000000..d1c192d80ef7
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_regtest.h
@@ -0,0 +1,256 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool register test data */
+struct igb_reg_test {
+ u16 reg;
+ u16 reg_offset;
+ u16 array_len;
+ u16 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x100 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* i210 reg test */
+static struct igb_reg_test reg_test_i210[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i210, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x900FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* i350 reg test */
+static struct igb_reg_test reg_test_i350[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ /* VET is readonly on i350 */
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* RDH is read-only for i350, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 16, TABLE64_TEST_HI,
+ 0xC3FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82580 reg test */
+static struct igb_reg_test reg_test_82580[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* RDH is read-only for 82580, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82576 reg test */
+static struct igb_reg_test reg_test_82576[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* Enable all queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82576, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA2, 0, 8, TABLE64_TEST_HI,
+ 0x83FFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+/* 82575 register test */
+static struct igb_reg_test reg_test_82575[] = {
+ { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
+ { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
+ 0xFFFFFFFF },
+ { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
+ 0xFFFFFFFF },
+ { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ /* Enable all four RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ /* RDH is read-only for 82575, only test RDT. */
+ { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
+ { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
+ { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80,
+ 0xFFFFFFFF },
+ { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF,
+ 0xFFFFFFFF },
+ { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80,
+ 0x000FFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
+ { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
+ { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_LO,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RA, 0, 16, TABLE64_TEST_HI,
+ 0x800FFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST,
+ 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0, 0, 0, 0 }
+};
+
+
diff --git a/drivers/net/ethernet/intel/igb/igb_vmdq.c b/drivers/net/ethernet/intel/igb/igb_vmdq.c
new file mode 100644
index 000000000000..fdc0bcbbb6ec
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_vmdq.c
@@ -0,0 +1,433 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/tcp.h>
+
+#include "igb.h"
+#include "igb_vmdq.h"
+#include <linux/if_vlan.h>
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+int igb_vmdq_open(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct net_device *main_netdev = adapter->netdev;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ DPRINTK(DRV, WARNING,
+ "Open %s before opening this device.\n",
+ main_netdev->name);
+ return -EAGAIN;
+ }
+ netif_carrier_off(dev);
+ vadapter->tx_ring->vmdq_netdev = dev;
+ vadapter->rx_ring->vmdq_netdev = dev;
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+ igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
+ }
+ netif_carrier_on(dev);
+ return 0;
+}
+
+int igb_vmdq_close(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ netif_carrier_off(dev);
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+
+ vadapter->tx_ring->vmdq_netdev = NULL;
+ vadapter->rx_ring->vmdq_netdev = NULL;
+ return 0;
+}
+
+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+
+ return igb_xmit_frame_ring(skb, vadapter->tx_ring);
+}
+
+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vadapter->net_stats.rx_packets +=
+ E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
+ vadapter->net_stats.tx_packets +=
+ E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
+ vadapter->net_stats.rx_bytes +=
+ E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
+ vadapter->net_stats.tx_bytes +=
+ E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
+ vadapter->net_stats.multicast +=
+ E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
+ E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
+ /* only return the current stats */
+ return &vadapter->net_stats;
+}
+
+/**
+ * igb_write_vm_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int igb_write_vm_addr_list(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int count = 0;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > igb_available_rars(adapter))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ struct netdev_hw_addr *ha;
+#else
+ struct dev_mc_list *ha;
+#endif
+ netdev_for_each_uc_addr(ha, netdev) {
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ igb_del_mac_filter(adapter, ha->addr, hw_queue);
+ igb_add_mac_filter(adapter, ha->addr, hw_queue);
+#else
+ igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
+ igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
+#endif
+ count++;
+ }
+ }
+ return count;
+}
+
+
+#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
+void igb_vmdq_set_rx_mode(struct net_device *dev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr, rctl;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
+
+ /* clear the affected bits */
+ vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
+ E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
+
+ if (dev->flags & IFF_PROMISC) {
+ vmolr |= E1000_VMOLR_UPE;
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl |= E1000_RCTL_UPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ } else {
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
+ rctl &= ~E1000_RCTL_UPE;
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+ if (dev->flags & IFF_ALLMULTI) {
+ vmolr |= E1000_VMOLR_MPME;
+ } else {
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscous mode so
+ * that we can at least receive multicast traffic
+ */
+ if (igb_write_mc_addr_list(adapter->netdev) != 0)
+ vmolr |= E1000_VMOLR_ROMPE;
+ }
+#ifdef HAVE_SET_RX_MODE
+ /*
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscous mode
+ */
+ if (igb_write_vm_addr_list(dev) < 0)
+ vmolr |= E1000_VMOLR_UPE;
+#endif
+ }
+ E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
+
+ return;
+}
+
+int igb_vmdq_set_mac(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
+}
+
+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+
+ if (adapter->netdev->mtu < new_mtu) {
+ DPRINTK(PROBE, INFO,
+ "Set MTU on %s to >= %d before changing MTU on %s\n",
+ adapter->netdev->name, new_mtu, dev->name);
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+void igb_vmdq_tx_timeout(struct net_device *dev)
+{
+ return;
+}
+
+void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vadapter->vlgrp = grp;
+
+ igb_enable_vlan_tags(adapter);
+ E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
+
+ return;
+}
+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ struct net_device *v_netdev;
+#endif
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ /* attempt to add filter to vlvf array */
+ igb_vlvf_set(adapter, vid, TRUE, hw_queue);
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+
+ /* Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
+#endif
+
+ return;
+}
+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ int hw_queue = vadapter->rx_ring->queue_index +
+ adapter->vfs_allocated_count;
+
+ vlan_group_set_device(vadapter->vlgrp, vid, NULL);
+ /* remove vlan from VLVF table array */
+ igb_vlvf_set(adapter, vid, FALSE, hw_queue);
+
+
+ return;
+}
+
+static int igb_vmdq_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 status;
+
+ if (hw->phy.media_type == e1000_media_type_copper) {
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+
+ if (hw->mac.autoneg == 1) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ /* the e1000 autoneg seems to match ethtool nicely */
+ ecmd->advertising |= hw->phy.autoneg_advertised;
+ }
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = hw->phy.addr;
+ } else {
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_1000baseT_Full |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg);
+
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ status = E1000_READ_REG(hw, E1000_STATUS);
+
+ if (status & E1000_STATUS_LU) {
+
+ if ((status & E1000_STATUS_SPEED_1000) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ else if (status & E1000_STATUS_SPEED_100)
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ else
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+
+ if ((status & E1000_STATUS_FD) ||
+ hw->phy.media_type != e1000_media_type_copper)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ return 0;
+}
+
+
+static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ return adapter->msg_enable;
+}
+
+static void igb_vmdq_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+ struct net_device *main_netdev = adapter->netdev;
+
+ strncpy(drvinfo->driver, igb_driver_name, 32);
+ strncpy(drvinfo->version, igb_driver_version, 32);
+
+ strncpy(drvinfo->fw_version, "N/A", 4);
+ snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
+ vadapter->rx_ring->queue_index);
+ drvinfo->n_stats = 0;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = 0;
+}
+
+static void igb_vmdq_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+
+ struct igb_ring *tx_ring = vadapter->tx_ring;
+ struct igb_ring *rx_ring = vadapter->rx_ring;
+
+ ring->rx_max_pending = IGB_MAX_RXD;
+ ring->tx_max_pending = IGB_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
+{
+ struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
+ struct igb_adapter *adapter = vadapter->real_adapter;
+
+ return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
+}
+
+
+static struct ethtool_ops igb_vmdq_ethtool_ops = {
+ .get_settings = igb_vmdq_get_settings,
+ .get_drvinfo = igb_vmdq_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = igb_vmdq_get_ringparam,
+ .get_rx_csum = igb_vmdq_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = igb_vmdq_get_msglevel,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+#endif
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
+};
+
+void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
+}
+
+
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+
diff --git a/drivers/net/ethernet/intel/igb/igb_vmdq.h b/drivers/net/ethernet/intel/igb/igb_vmdq.h
new file mode 100644
index 000000000000..1411ac7b8989
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_vmdq.h
@@ -0,0 +1,43 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IGB_VMDQ_H_
+#define _IGB_VMDQ_H_
+
+#ifdef CONFIG_IGB_VMDQ_NETDEV
+int igb_vmdq_open(struct net_device *dev);
+int igb_vmdq_close(struct net_device *dev);
+netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
+void igb_vmdq_set_rx_mode(struct net_device *dev);
+int igb_vmdq_set_mac(struct net_device *dev, void *addr);
+int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
+void igb_vmdq_tx_timeout(struct net_device *dev);
+void igb_vmdq_vlan_rx_register(struct net_device *dev,
+ struct vlan_group *grp);
+void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
+#endif /* CONFIG_IGB_VMDQ_NETDEV */
+#endif /* _IGB_VMDQ_H_ */
diff --git a/drivers/net/ethernet/intel/igb/kcompat.c b/drivers/net/ethernet/intel/igb/kcompat.c
new file mode 100644
index 000000000000..82991b5d66ee
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/kcompat.c
@@ -0,0 +1,2082 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+/* From lib/vsprintf.c */
+#include <asm/div64.h>
+
+static int skip_atoi(const char **s)
+{
+ int i=0;
+
+ while (isdigit(**s))
+ i = i*10 + *((*s)++) - '0';
+ return i;
+}
+
+#define _kc_ZEROPAD 1 /* pad with zero */
+#define _kc_SIGN 2 /* unsigned/signed long */
+#define _kc_PLUS 4 /* show plus */
+#define _kc_SPACE 8 /* space if plus */
+#define _kc_LEFT 16 /* left justified */
+#define _kc_SPECIAL 32 /* 0x */
+#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
+{
+ char c,sign,tmp[66];
+ const char *digits;
+ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ int i;
+
+ digits = (type & _kc_LARGE) ? large_digits : small_digits;
+ if (type & _kc_LEFT)
+ type &= ~_kc_ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & _kc_ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & _kc_SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & _kc_PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & _kc_SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & _kc_SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++]='0';
+ else while (num != 0)
+ tmp[i++] = digits[do_div(num,base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
+ while(size-->0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ if (sign) {
+ if (buf <= end)
+ *buf = sign;
+ ++buf;
+ }
+ if (type & _kc_SPECIAL) {
+ if (base==8) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ } else if (base==16) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ if (buf <= end)
+ *buf = digits[33];
+ ++buf;
+ }
+ }
+ if (!(type & _kc_LEFT)) {
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = c;
+ ++buf;
+ }
+ }
+ while (i < precision--) {
+ if (buf <= end)
+ *buf = '0';
+ ++buf;
+ }
+ while (i-- > 0) {
+ if (buf <= end)
+ *buf = tmp[i];
+ ++buf;
+ }
+ while (size-- > 0) {
+ if (buf <= end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long long num;
+ int i, base;
+ char *str, *end, c;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ number of chars for from string */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+ /* 'z' support added 23/7/1999 S.H. */
+ /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+ str = buf;
+ end = buf + size - 1;
+
+ if (end < buf - 1) {
+ end = ((void *) -1);
+ size = end - buf + 1;
+ }
+
+ for (; *fmt ; ++fmt) {
+ if (*fmt != '%') {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-': flags |= _kc_LEFT; goto repeat;
+ case '+': flags |= _kc_PLUS; goto repeat;
+ case ' ': flags |= _kc_SPACE; goto repeat;
+ case '#': flags |= _kc_SPECIAL; goto repeat;
+ case '0': flags |= _kc_ZEROPAD; goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= _kc_LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & _kc_LEFT)) {
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ c = (unsigned char) va_arg(args, int);
+ if (str <= end)
+ *str = c;
+ ++str;
+ while (--field_width > 0) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & _kc_LEFT)) {
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (str <= end)
+ *str = *s;
+ ++str; ++s;
+ }
+ while (len < field_width--) {
+ if (str <= end)
+ *str = ' ';
+ ++str;
+ }
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= _kc_ZEROPAD;
+ }
+ str = number(str, end,
+ (unsigned long) va_arg(args, void *),
+ 16, field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ /* FIXME:
+ * What does C99 say about the overflow case here? */
+ if (qualifier == 'l') {
+ long * ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else if (qualifier == 'Z') {
+ size_t * ip = va_arg(args, size_t *);
+ *ip = (str - buf);
+ } else {
+ int * ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ if (str <= end)
+ *str = '%';
+ ++str;
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= _kc_LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= _kc_SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (str <= end)
+ *str = '%';
+ ++str;
+ if (*fmt) {
+ if (str <= end)
+ *str = *fmt;
+ ++str;
+ } else {
+ --fmt;
+ }
+ continue;
+ }
+ if (qualifier == 'L')
+ num = va_arg(args, long long);
+ else if (qualifier == 'l') {
+ num = va_arg(args, unsigned long);
+ if (flags & _kc_SIGN)
+ num = (signed long) num;
+ } else if (qualifier == 'Z') {
+ num = va_arg(args, size_t);
+ } else if (qualifier == 'h') {
+ num = (unsigned short) va_arg(args, int);
+ if (flags & _kc_SIGN)
+ num = (signed short) num;
+ } else {
+ num = va_arg(args, unsigned int);
+ if (flags & _kc_SIGN)
+ num = (signed int) num;
+ }
+ str = number(str, end, num, base,
+ field_width, precision, flags);
+ }
+ if (str <= end)
+ *str = '\0';
+ else if (size > 0)
+ /* don't write out a null byte if the buf size is zero */
+ *end = '\0';
+ /* the trailing null byte doesn't count towards the total
+ * ++str;
+ */
+ return str-buf;
+}
+
+int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = _kc_vsnprintf(buf,size,fmt,args);
+ va_end(args);
+ return i;
+}
+#endif /* < 2.4.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+ PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+ size_t size, int direction)
+{
+ return pci_map_single(dev, (void *)page_address(page) + offset, size,
+ direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+ int direction)
+{
+ return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+ dev->dma_mask = mask;
+ return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
+ pci_release_regions(dev);
+ return -EBUSY;
+ }
+ }
+ }
+ return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (pci_resource_len(dev, i) == 0)
+ continue;
+
+ if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+
+ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
+ }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *dev;
+ int alloc_size;
+
+ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+ dev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ if (sizeof_priv)
+ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+ dev->name[0] = '\0';
+ ether_setup(dev);
+
+ return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+ const char zaddr[6] = { 0, };
+
+ return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+ return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+ return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+ int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffs(tmp);
+}
+
+size_t _kc_strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+#ifndef do_div
+#if BITS_PER_LONG == 32
+uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
+{
+ uint64_t rem = *n;
+ uint64_t b = base;
+ uint64_t res, d = 1;
+ uint32_t high = rem >> 32;
+
+ /* Reduce the thing a bit first */
+ res = 0;
+ if (high >= base) {
+ high /= base;
+ res = (uint64_t) high << 32;
+ rem -= (uint64_t) (high*base) << 32;
+ }
+
+ while ((int64_t)b > 0 && b < rem) {
+ b = b+b;
+ d = d+d;
+ }
+
+ do {
+ if (rem >= b) {
+ rem -= b;
+ res += d;
+ }
+ b >>= 1;
+ d >>= 1;
+ } while (d);
+
+ *n = res;
+ return rem;
+}
+#endif /* BITS_PER_LONG == 32 */
+#endif /* do_div */
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
+{
+ va_list args;
+ int i;
+
+ va_start(args, fmt);
+ i = vsnprintf(buf, size, fmt, args);
+ va_end(args);
+ return (i >= size) ? (size - 1) : i;
+}
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+char *_kc_kstrdup(const char *s, unsigned int gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strlen(s) + 1;
+ buf = kmalloc(len, gfp);
+ if (buf)
+ memcpy(buf, s, len);
+ return buf;
+}
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+ void *ret = kmalloc(size, flags);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_skb_pad(struct sk_buff *skb, int pad)
+{
+ int ntail;
+
+ /* If the skbuff is non linear tailroom is always zero.. */
+ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
+ memset(skb->data+skb->len, 0, pad);
+ return 0;
+ }
+
+ ntail = skb->data_len + pad - (skb->end - skb->tail);
+ if (likely(skb_cloned(skb) || ntail > 0)) {
+ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC))
+ goto free_skb;
+ }
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_is_nonlinear(skb) &&
+ !__pskb_pull_tail(skb, skb->data_len))
+ goto free_skb;
+
+#endif
+ memset(skb->data + skb->len, 0, pad);
+ return 0;
+
+free_skb:
+ kfree_skb(skb);
+ return -ENOMEM;
+}
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset, pcie_link_status;
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+ /* no ->dev for 2.4 kernels */
+ WARN_ON(pdev->dev.driver_data == NULL);
+#endif
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset) {
+ if (!pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+ }
+ pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+ if (adapter->config_space == NULL)
+#else
+ WARN_ON(adapter->config_space != NULL);
+#endif
+ adapter->config_space = kmalloc(size, GFP_KERNEL);
+ if (!adapter->config_space) {
+ printk(KERN_ERR "Out of memory in pci_save_state\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < (size / 4); i++)
+ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+ return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int size = PCI_CONFIG_SPACE_LEN, i;
+ u16 pcie_cap_offset;
+ u16 pcie_link_status;
+
+ if (adapter->config_space != NULL) {
+ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (pcie_cap_offset &&
+ !pci_read_config_word(pdev,
+ pcie_cap_offset + PCIE_LINK_STATUS,
+ &pcie_link_status))
+ size = PCIE_CONFIG_SPACE_LEN;
+
+ pci_config_space_ich8lan();
+ for (i = 0; i < (size / 4); i++)
+ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+ kfree(adapter->config_space);
+ adapter->config_space = NULL;
+#endif
+ }
+}
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+
+ kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+ if (netdev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)netdev - netdev->padded);
+ } else {
+ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+ netdev->reg_state = NETREG_RELEASED;
+ class_device_put(&netdev->class_dev);
+ }
+#else
+ kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+
+void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
+{
+ void *p;
+
+ p = kzalloc(len, gfp);
+ if (p)
+ memcpy(p, src, len);
+ return p;
+}
+#endif /* <= 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
+{
+ return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
+}
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+/* hexdump code taken from lib/hexdump.c */
+static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, unsigned char *linebuf,
+ size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (!len)
+ goto nil;
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ switch (groupsize) {
+ case 8: {
+ const u64 *ptr8 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ (unsigned long long)*(ptr8 + j));
+ ascii_column = 17 * ngroups + 2;
+ break;
+ }
+
+ case 4: {
+ const u32 *ptr4 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%8.8x", j ? " " : "", *(ptr4 + j));
+ ascii_column = 9 * ngroups + 2;
+ break;
+ }
+
+ case 2: {
+ const u16 *ptr2 = buf;
+ int ngroups = len / groupsize;
+
+ for (j = 0; j < ngroups; j++)
+ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
+ "%s%4.4x", j ? " " : "", *(ptr2 + j));
+ ascii_column = 5 * ngroups + 2;
+ break;
+ }
+
+ default:
+ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc(ch >> 4);
+ linebuf[lx++] = hex_asc(ch & 0x0f);
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+
+ ascii_column = 3 * rowsize + 2;
+ break;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
+ linebuf[lx++] = ' ';
+ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
+ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
+ : '.';
+nil:
+ linebuf[lx++] = '\0';
+}
+
+void _kc_print_hex_dump(const char *level,
+ const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[200];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%*p: %s\n", level, prefix_str,
+ (int)(2 * sizeof(void *)), ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+
+#ifdef HAVE_I2C_SUPPORT
+struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+ struct i2c_client *client;
+ int status;
+
+ client = kzalloc(sizeof *client, GFP_KERNEL);
+ if (!client)
+ return NULL;
+
+ client->adapter = adap;
+
+ client->dev.platform_data = info->platform_data;
+
+ client->flags = info->flags;
+ client->addr = info->addr;
+
+ strlcpy(client->name, info->type, sizeof(client->name));
+
+ /* Check for address business */
+ status = i2c_check_addr(adap, client->addr);
+ if (status)
+ goto out_err;
+
+ client->dev.parent = &client->adapter->dev;
+ client->dev.bus = &i2c_bus_type;
+
+ status = i2c_attach_client(client);
+ if (status)
+ goto out_err;
+
+ dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
+ client->name, dev_name(&client->dev));
+
+ return client;
+
+out_err:
+ dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
+ "(%d)\n", client->name, client->addr, status);
+ kfree(client);
+ return NULL;
+}
+#endif /* HAVE_I2C_SUPPORT */
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
+{
+ struct adapter_q_vector *q_vector = container_of(napi,
+ struct adapter_q_vector,
+ napi);
+ return &q_vector->poll_dev;
+}
+
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+ int work_done;
+ int work_to_do = min(*budget, netdev->quota);
+ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+ struct napi_struct *napi = netdev->priv;
+ work_done = napi->poll(napi, work_to_do);
+ *budget -= work_done;
+ netdev->quota -= work_done;
+ return (work_done >= work_to_do) ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 link_state;
+ int pos;
+
+ if (!parent)
+ return;
+
+ pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ if (pos) {
+ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
+ link_state &= ~state;
+ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
+ }
+}
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_stop_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_wake_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+ struct adapter_struct *adapter = netdev_priv(netdev);
+ int i;
+
+ netif_start_queue(netdev);
+ if (netif_is_multiqueue(netdev))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+
+void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
+{
+ va_list args;
+
+ printk(KERN_WARNING "------------[ cut here ]------------\n");
+ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line);
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ dump_stack();
+}
+#endif /* __VMKLNX__ */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+
+int
+_kc_pci_prepare_to_sleep(struct pci_dev *dev)
+{
+ pci_power_t target_state;
+ int error;
+
+ target_state = pci_choose_state(dev, PMSG_SUSPEND);
+
+ pci_enable_wake(dev, target_state, true);
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ pci_enable_wake(dev, target_state, false);
+
+ return error;
+}
+
+int
+_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ int err;
+
+ err = pci_enable_wake(dev, PCI_D3cold, enable);
+ if (err)
+ goto out;
+
+ err = pci_enable_wake(dev, PCI_D3hot, enable);
+
+out:
+ return err;
+}
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
+{
+ u16 old_cmd, cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
+ if (enable)
+ cmd = old_cmd | PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
+ if (cmd != old_cmd) {
+ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
+ enable ? "enabling" : "disabling");
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
+ pdev->is_busmaster = enable;
+#endif
+}
+
+void _kc_pci_clear_master(struct pci_dev *dev)
+{
+ __kc_pci_set_master(dev, false);
+}
+#endif /* < 2.6.29 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev)
+{
+ int num_vf = 0;
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *vfdev;
+
+ /* loop through all ethernet devices starting at PF dev */
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == dev)
+ num_vf++;
+
+ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
+ }
+
+#endif
+ return num_vf;
+}
+#endif /* RHEL_RELEASE_CODE */
+#endif /* < 2.6.34 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+#ifdef HAVE_TX_MQ
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+{
+ unsigned int real_num = dev->real_num_tx_queues;
+ struct Qdisc *qdisc;
+ int i;
+
+ if (txq < 1 || txq > dev->num_tx_queues)
+ return -EINVAL;
+
+ else if (txq > real_num)
+ dev->real_num_tx_queues = txq;
+ else if (txq < real_num) {
+ dev->real_num_tx_queues = txq;
+ for (i = txq; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#endif /* HAVE_TX_MQ */
+
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count)
+{
+ loff_t pos = *ppos;
+ size_t res;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available || !count)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ res = copy_from_user(to + pos, from, count);
+ if (res == count)
+ return -EFAULT;
+ count -= res;
+ *ppos = pos + count;
+ return count;
+}
+
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+static const u32 _kc_flags_dup_features =
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
+
+u32 _kc_ethtool_op_get_flags(struct net_device *dev)
+{
+ return dev->features & _kc_flags_dup_features;
+}
+
+int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
+{
+ if (data & ~supported)
+ return -EINVAL;
+
+ dev->features = ((dev->features & ~_kc_flags_dup_features) |
+ (data & _kc_flags_dup_features));
+ return 0;
+}
+#endif /* < 2.6.36 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
+
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
+#endif /* < 2.6.39 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
+ int off, int size, unsigned int truesize)
+{
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += truesize;
+}
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+int _kc_simple_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+#endif /* SLE_VERSION < 11,3,0 */
+
+#endif /* < 3.4.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+static inline int __kc_pcie_cap_version(struct pci_dev *dev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
+ return reg16 & PCI_EXP_FLAGS_VERS;
+}
+
+static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
+{
+ return true;
+}
+
+static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END;
+}
+
+static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+ int pos;
+ u16 pcie_flags_reg;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return false;
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM &&
+ pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
+}
+
+static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return __kc_pcie_cap_version(dev) > 1 ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS_TYPE:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return __kc_pcie_cap_has_devctl(dev);
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return __kc_pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return __kc_pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return __kc_pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return __kc_pcie_cap_version(dev) > 1;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 1)
+ return -EINVAL;
+
+ if (__kc_pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_word() fails, it may
+ * have been written as 0xFFFF if hardware error happens
+ * during pci_read_config_word().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ /*
+ * For Functions that do not implement the Slot Capabilities,
+ * Slot Status, and Slot Control registers, these spaces must
+ * be hardwired to 0b, with the exception of the Presence Detect
+ * State bit in the Slot Status register of Downstream Ports,
+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
+ */
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!__kc_pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ int ret;
+ u16 val;
+
+ ret = __kc_pcie_capability_read_word(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = __kc_pcie_capability_write_word(dev, pos, val);
+ }
+
+ return ret;
+}
+
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+ u16 clear)
+{
+ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+#endif /* < 3.7.0 */
+
+/******************************************************************************
+ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright,
+ * inferred copyright from kernel
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ int target, unsigned short *fragoff, int *flags)
+{
+ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
+ u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+ unsigned int len;
+ bool found;
+
+#define __KC_IP6_FH_F_FRAG BIT(0)
+#define __KC_IP6_FH_F_AUTH BIT(1)
+#define __KC_IP6_FH_F_SKIP_RH BIT(2)
+
+ if (fragoff)
+ *fragoff = 0;
+
+ if (*offset) {
+ struct ipv6hdr _ip6, *ip6;
+
+ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6);
+ if (!ip6 || (ip6->version != 6)) {
+ printk(KERN_ERR "IPv6 header not found\n");
+ return -EBADMSG;
+ }
+ start = *offset + sizeof(struct ipv6hdr);
+ nexthdr = ip6->nexthdr;
+ }
+ len = skb->len - start;
+
+ do {
+ struct ipv6_opt_hdr _hdr, *hp;
+ unsigned int hdrlen;
+ found = (nexthdr == target);
+
+ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
+ if (target < 0 || found)
+ break;
+ return -ENOENT;
+ }
+
+ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
+ if (!hp)
+ return -EBADMSG;
+
+ if (nexthdr == NEXTHDR_ROUTING) {
+ struct ipv6_rt_hdr _rh, *rh;
+
+ rh = skb_header_pointer(skb, start, sizeof(_rh),
+ &_rh);
+ if (!rh)
+ return -EBADMSG;
+
+ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) &&
+ rh->segments_left == 0)
+ found = false;
+ }
+
+ if (nexthdr == NEXTHDR_FRAGMENT) {
+ unsigned short _frag_off;
+ __be16 *fp;
+
+ if (flags) /* Indicate that this is a fragment */
+ *flags |= __KC_IP6_FH_F_FRAG;
+ fp = skb_header_pointer(skb,
+ start+offsetof(struct frag_hdr,
+ frag_off),
+ sizeof(_frag_off),
+ &_frag_off);
+ if (!fp)
+ return -EBADMSG;
+
+ _frag_off = ntohs(*fp) & ~0x7;
+ if (_frag_off) {
+ if (target < 0 &&
+ ((!ipv6_ext_hdr(hp->nexthdr)) ||
+ hp->nexthdr == NEXTHDR_NONE)) {
+ if (fragoff)
+ *fragoff = _frag_off;
+ return hp->nexthdr;
+ }
+ return -ENOENT;
+ }
+ hdrlen = 8;
+ } else if (nexthdr == NEXTHDR_AUTH) {
+ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0))
+ break;
+ hdrlen = (hp->hdrlen + 2) << 2;
+ } else
+ hdrlen = ipv6_optlen(hp);
+
+ if (!found) {
+ nexthdr = hp->nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+ } while (!found);
+
+ *offset = start;
+ return nexthdr;
+}
+#endif /* < 3.8.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+#endif /* 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ u16 flags)
+#else
+int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr, u16 flags)
+#endif
+{
+ int err = -EINVAL;
+
+ /* If aging addresses are supported device will need to
+ * implement its own handler for this.
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ pr_info("%s: FDB only supports static addresses\n", dev->name);
+ return err;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifdef USE_CONST_DEV_UC_CHAR
+#ifdef HAVE_FDB_DEL_NLATTR
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr)
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr)
+#endif
+#else
+int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ int err = -EINVAL;
+
+ /* If aging addresses are supported device will need to
+ * implement its own handler for this.
+ */
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
+ pr_info("%s: FDB only supports static addresses\n", dev->name);
+ return err;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+
+ return err;
+}
+
+#endif /* HAVE_FDB_OPS */
+#ifdef CONFIG_PCI_IOV
+int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev)
+{
+ unsigned int vfs_assigned = 0;
+#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
+ int pos;
+ struct pci_dev *vfdev;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /* find SR-IOV capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+
+ /*
+ * determine the device ID for the VFs, the vendor ID will be the
+ * same as the PF so there is no need to check for that one
+ */
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * It is considered assigned if it is a virtual function with
+ * our dev as the physical function and the assigned bit is set
+ */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
+ return vfs_assigned;
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* 3.10.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+const unsigned char pcie_link_speed[] = {
+ PCI_SPEED_UNKNOWN, /* 0 */
+ PCIE_SPEED_2_5GT, /* 1 */
+ PCIE_SPEED_5_0GT, /* 2 */
+ PCIE_SPEED_8_0GT, /* 3 */
+ PCI_SPEED_UNKNOWN, /* 4 */
+ PCI_SPEED_UNKNOWN, /* 5 */
+ PCI_SPEED_UNKNOWN, /* 6 */
+ PCI_SPEED_UNKNOWN, /* 7 */
+ PCI_SPEED_UNKNOWN, /* 8 */
+ PCI_SPEED_UNKNOWN, /* 9 */
+ PCI_SPEED_UNKNOWN, /* A */
+ PCI_SPEED_UNKNOWN, /* B */
+ PCI_SPEED_UNKNOWN, /* C */
+ PCI_SPEED_UNKNOWN, /* D */
+ PCI_SPEED_UNKNOWN, /* E */
+ PCI_SPEED_UNKNOWN /* F */
+};
+
+int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ int ret;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (dev) {
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+
+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if (ret)
+ return ret;
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ if (next_speed < *speed)
+ *speed = next_speed;
+
+ if (next_width < *width)
+ *width = next_width;
+
+ dev = dev->bus->self;
+ }
+
+ return 0;
+}
+
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int err = dma_set_mask(dev, mask);
+
+ if (!err)
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does. However we store the error anyways, due
+ * to some kernels which use gcc's warn_unused_result on their
+ * definition of dma_set_coherent_mask.
+ */
+ err = dma_set_coherent_mask(dev, mask);
+ return err;
+}
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+ /* Set of random keys generated using kernel random number generator */
+ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62,
+ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F,
+ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95,
+ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC,
+ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41,
+ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A,
+ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20};
+
+ BUG_ON(len > NETDEV_RSS_KEY_LEN);
+ memcpy(buffer, seed, len);
+}
+#endif /* 3.13.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ do {
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+#endif /* 3.14.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *, const unsigned char *),
+ int (*unsync)(struct net_device *, const unsigned char *))
+{
+ struct netdev_hw_addr *ha, *tmp;
+ int err;
+
+ /* first go through and flush out any stale entries */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+ if (!ha->synced || ha->refcount != 1)
+#else
+ if (!ha->sync_cnt || ha->refcount != 1)
+#endif
+ continue;
+
+ if (unsync && unsync(dev, ha->addr))
+ continue;
+
+ list_del_rcu(&ha->list);
+ kfree_rcu(ha, rcu_head);
+ list->count--;
+ }
+
+ /* go through and sync new entries to the list */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+ if (ha->synced)
+#else
+ if (ha->sync_cnt)
+#endif
+ continue;
+
+ err = sync(dev, ha->addr);
+ if (err)
+ return err;
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+ ha->synced = true;
+#else
+ ha->sync_cnt++;
+#endif
+ ha->refcount++;
+ }
+
+ return 0;
+}
+
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *, const unsigned char *))
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+ if (!ha->synced)
+#else
+ if (!ha->sync_cnt)
+#endif
+ continue;
+
+ if (unsync && unsync(dev, ha->addr))
+ continue;
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+ ha->synced = false;
+#else
+ ha->sync_cnt--;
+#endif
+ if (--ha->refcount)
+ continue;
+
+ list_del_rcu(&ha->list);
+ kfree_rcu(ha, rcu_head);
+ list->count--;
+ }
+}
+
+#endif /* NETDEV_HW_ADDR_T_UNICAST */
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+ struct net_device *dev,
+ int (*sync)(struct net_device *, const unsigned char *),
+ int (*unsync)(struct net_device *, const unsigned char *))
+{
+ struct dev_addr_list *da, **next = list;
+ int err;
+
+ /* first go through and flush out any stale entries */
+ while ((da = *next) != NULL) {
+ if (da->da_synced && da->da_users == 1) {
+ if (!unsync || !unsync(dev, da->da_addr)) {
+ *next = da->next;
+ kfree(da);
+ (*count)--;
+ continue;
+ }
+ }
+ next = &da->next;
+ }
+
+ /* go through and sync new entries to the list */
+ for (da = *list; da != NULL; da = da->next) {
+ if (da->da_synced)
+ continue;
+
+ err = sync(dev, da->da_addr);
+ if (err)
+ return err;
+
+ da->da_synced++;
+ da->da_users++;
+ }
+
+ return 0;
+}
+
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *, const unsigned char *))
+{
+ struct dev_addr_list *da;
+
+ while ((da = *list) != NULL) {
+ if (da->da_synced) {
+ if (!unsync || !unsync(dev, da->da_addr)) {
+ da->da_synced--;
+ if (--da->da_users == 0) {
+ *list = da->next;
+ kfree(da);
+ (*count)--;
+ continue;
+ }
+ }
+ }
+ list = &da->next;
+ }
+}
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+#endif /* HAVE_SET_RX_MODE */
+#endif /* 3.16.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+static void __kc_sock_efree(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct sk_buff *clone;
+
+ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+ return NULL;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (!clone) {
+ sock_put(sk);
+ return NULL;
+ }
+
+ clone->sk = sk;
+ clone->destructor = __kc_sock_efree;
+
+ return clone;
+}
+
+void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ struct sock_exterr_skb *serr;
+ struct sock *sk = skb->sk;
+ int err;
+
+ sock_hold(sk);
+
+ *skb_hwtstamps(skb) = *hwtstamps;
+
+ serr = SKB_EXT_ERR(skb);
+ memset(serr, 0, sizeof(*serr));
+ serr->ee.ee_errno = ENOMSG;
+ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
+
+ err = sock_queue_err_skb(sk, skb);
+ if (err)
+ kfree_skb(skb);
+
+ sock_put(sk);
+}
+#endif
+
+/* include headers needed for get_headlen function */
+#ifdef HAVE_SCTP
+#include <linux/sctp.h>
+#endif
+
+unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 proto;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ proto = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+again:
+ switch (proto) {
+ /* handle any vlan tag if present */
+ case __constant_htons(ETH_P_8021AD):
+ case __constant_htons(ETH_P_8021Q):
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ proto = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ goto again;
+ /* handle L3 protocols */
+ case __constant_htons(ETH_P_IP):
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+ nexthdr = hdr.ipv4->protocol;
+
+ hdr.network += hlen;
+ break;
+#ifdef NETIF_F_TSO6
+ case __constant_htons(ETH_P_IPV6):
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hdr.network += sizeof(struct ipv6hdr);
+ break;
+#endif /* NETIF_F_TSO6 */
+ default:
+ return hdr.network - data;
+ }
+
+ /* finally sort out L4 */
+ switch (nexthdr) {
+ case IPPROTO_TCP:
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hdr.network += max_t(u8, sizeof(struct tcphdr),
+ (hdr.network[12] & 0xF0) >> 2);
+
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ hdr.network += sizeof(struct udphdr);
+ break;
+#ifdef HAVE_SCTP
+ case IPPROTO_SCTP:
+ hdr.network += sizeof(struct sctphdr);
+ break;
+#endif
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
+ */
+ return min_t(unsigned int, hdr.network - data, max_len);
+}
+
+#endif /* < 3.18.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+#ifdef HAVE_NET_GET_RANDOM_ONCE
+static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN];
+
+void __kc_netdev_rss_key_fill(void *buffer, size_t len)
+{
+ BUG_ON(len > sizeof(__kc_netdev_rss_key));
+ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key));
+ memcpy(buffer, __kc_netdev_rss_key, len);
+}
+#endif
+#endif
diff --git a/drivers/net/ethernet/intel/igb/kcompat.h b/drivers/net/ethernet/intel/igb/kcompat.h
new file mode 100644
index 000000000000..6b3b3fe5a452
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/kcompat.h
@@ -0,0 +1,5075 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+#endif
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include <net/ipv6.h>
+/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */
+#ifndef UTS_RELEASE
+/* utsrelease.h changed locations in 2.6.33 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#include <linux/utsrelease.h>
+#else
+#include <generated/utsrelease.h>
+#endif
+#endif
+
+/* NAPI enable/disable flags here */
+#define NAPI
+
+#define adapter_struct igb_adapter
+#define adapter_q_vector igb_q_vector
+#define NAPI
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#else
+#endif /* NAPI */
+
+/* Dynamic LTR and deeper C-State support disable/enable */
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT
+#endif
+#endif /* DISABLE_PACKET_SPLIT */
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+ u16 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+#endif
+#undef pci_enable_msi
+#define pci_enable_msi(a) -ENOTSUPP
+#undef pci_disable_msi
+#define pci_disable_msi(a) do {} while (0)
+#undef pci_enable_msix
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#undef pci_disable_msix
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#else
+#define _Bool char
+#endif
+
+#undef __always_unused
+#define __always_unused __attribute__((__unused__))
+
+#undef __maybe_unused
+#define __maybe_unused __attribute__((__unused__))
+
+/* kernels less than 2.4.14 don't have this */
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef uninitialized_var
+#define uninitialized_var(x) x = x
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+#ifndef PCIE_LINK_STATE_L1
+#define PCIE_LINK_STATE_L1 2
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+ just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef NETIF_F_GRO
+#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
+ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
+#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
+#endif
+
+#ifndef NETIF_F_SCTP_CSUM
+#define NETIF_F_SCTP_CSUM 0
+#endif
+
+#ifndef NETIF_F_LRO
+#define NETIF_F_LRO (1 << 15)
+#endif
+
+#ifndef NETIF_F_NTUPLE
+#define NETIF_F_NTUPLE (1 << 27)
+#endif
+
+#ifndef NETIF_F_ALL_FCOE
+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+ NETIF_F_FSO)
+#endif
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132
+#endif
+
+#ifndef IPPROTO_UDPLITE
+#define IPPROTO_UDPLITE 136
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1 0x17 /* Reserved... */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef node_online
+#define node_online(node) ((node) == 0)
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef cpu_online
+#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#if defined(__i386__) || defined(__x86_64__)
+#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#endif
+#endif
+
+/* taken from 2.6.24 definition in linux/kernel.h */
+#ifndef IS_ALIGNED
+#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
+#endif
+
+#ifdef IS_ENABLED
+#undef IS_ENABLED
+#undef __ARG_PLACEHOLDER_1
+#undef config_enabled
+#undef _config_enabled
+#undef __config_enabled
+#undef ___config_enabled
+#endif
+
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+#define IS_ENABLED(option) \
+ (config_enabled(option) || config_enabled(option##_MODULE))
+
+#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
+struct _kc_vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_ethhdr _kc_vlan_ethhdr
+struct _kc_vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+#define vlan_hdr _kc_vlan_hdr
+#define vlan_tx_tag_present(_skb) 0
+#define vlan_tx_tag_get(_skb) 0
+#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
+
+#ifndef VLAN_PRIO_SHIFT
+#define VLAN_PRIO_SHIFT 13
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_2_5GB
+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_5_0GB
+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002
+#endif
+
+#ifndef PCI_EXP_LNKSTA_CLS_8_0GB
+#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X1
+#define PCI_EXP_LNKSTA_NLW_X1 0x0010
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X2
+#define PCI_EXP_LNKSTA_NLW_X2 0x0020
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X4
+#define PCI_EXP_LNKSTA_NLW_X4 0x0040
+#endif
+
+#ifndef PCI_EXP_LNKSTA_NLW_X8
+#define PCI_EXP_LNKSTA_NLW_X8 0x0080
+#endif
+
+#ifndef __GFP_COLD
+#define __GFP_COLD 0
+#endif
+
+#ifndef __GFP_COMP
+#define __GFP_COMP 0
+#endif
+
+#ifndef IP_OFFSET
+#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+ u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[32];
+ char bus_info[32];
+ char reserved1[32];
+ char reserved2[16];
+ u32 n_stats;
+ u32 testinfo_len;
+ u32 eedump_len;
+ u32 regdump_len;
+};
+
+struct ethtool_stats {
+ u32 cmd;
+ u32 n_stats;
+ u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+ ETH_SS_TEST = 0,
+ ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+ u32 cmd; /* ETHTOOL_GSTRINGS */
+ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
+ u32 len; /* number of strings in the string set */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+ ETH_TEST_FL_OFFLINE = (1 << 0),
+ ETH_TEST_FL_FAILED = (1 << 1),
+};
+struct ethtool_test {
+ u32 cmd;
+ u32 flags;
+ u32 reserved;
+ u32 len;
+ u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+ u32 cmd;
+ u32 magic;
+ u32 offset;
+ u32 len;
+ u8 data[0];
+};
+
+struct ethtool_value {
+ u32 cmd;
+ u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GWOL
+#define ETHTOOL_GWOL 0x5
+#define ETHTOOL_SWOL 0x6
+#define SOPASS_MAX 6
+struct ethtool_wolinfo {
+ u32 cmd;
+ u32 supported;
+ u32 wolopts;
+ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+};
+#endif /* ETHTOOL_GWOL */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+ u32 cmd;
+ u32 version; /* driver-specific, indicates different chips/revs */
+ u32 len; /* bytes */
+ u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+ u32 cmd; /* ETHTOOL_{G,S}COALESCE */
+
+ /* How many usecs to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_max_coalesced_frames
+ * is used.
+ */
+ u32 rx_coalesce_usecs;
+
+ /* How many packets to delay an RX interrupt after
+ * a packet arrives. If 0, only rx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause RX interrupts to never be
+ * generated.
+ */
+ u32 rx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 rx_coalesce_usecs_irq;
+ u32 rx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_max_coalesced_frames
+ * is used.
+ */
+ u32 tx_coalesce_usecs;
+
+ /* How many packets to delay a TX interrupt after
+ * a packet is sent. If 0, only tx_coalesce_usecs is
+ * used. It is illegal to set both usecs and max frames
+ * to zero as this would cause TX interrupts to never be
+ * generated.
+ */
+ u32 tx_max_coalesced_frames;
+
+ /* Same as above two parameters, except that these values
+ * apply while an IRQ is being serviced by the host. Not
+ * all cards support this feature and the values are ignored
+ * in that case.
+ */
+ u32 tx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames_irq;
+
+ /* How many usecs to delay in-memory statistics
+ * block updates. Some drivers do not have an in-memory
+ * statistic block, and in such cases this value is ignored.
+ * This value must not be zero.
+ */
+ u32 stats_block_coalesce_usecs;
+
+ /* Adaptive RX/TX coalescing is an algorithm implemented by
+ * some drivers to improve latency under low packet rates and
+ * improve throughput under high packet rates. Some drivers
+ * only implement one of RX or TX adaptive coalescing. Anything
+ * not implemented by the driver causes these values to be
+ * silently ignored.
+ */
+ u32 use_adaptive_rx_coalesce;
+ u32 use_adaptive_tx_coalesce;
+
+ /* When the packet rate (measured in packets per second)
+ * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+ * used.
+ */
+ u32 pkt_rate_low;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_max_coalesced_frames_low;
+ u32 tx_coalesce_usecs_low;
+ u32 tx_max_coalesced_frames_low;
+
+ /* When the packet rate is below pkt_rate_high but above
+ * pkt_rate_low (both measured in packets per second) the
+ * normal {rx,tx}_* coalescing parameters are used.
+ */
+
+ /* When the packet rate is (measured in packets per second)
+ * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+ * used.
+ */
+ u32 pkt_rate_high;
+ u32 rx_coalesce_usecs_high;
+ u32 rx_max_coalesced_frames_high;
+ u32 tx_coalesce_usecs_high;
+ u32 tx_max_coalesced_frames_high;
+
+ /* How often to do adaptive coalescing packet rate sampling,
+ * measured in seconds. Must not be zero.
+ */
+ u32 rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
+
+ /* Read only attributes. These indicate the maximum number
+ * of pending RX/TX ring entries the driver will allow the
+ * user to set.
+ */
+ u32 rx_max_pending;
+ u32 rx_mini_max_pending;
+ u32 rx_jumbo_max_pending;
+ u32 tx_max_pending;
+
+ /* Values changeable by the user. The valid values are
+ * in the range 1 to the "*_max_pending" counterpart above.
+ */
+ u32 rx_pending;
+ u32 rx_mini_pending;
+ u32 rx_jumbo_pending;
+ u32 tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
+
+ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+ * being true) the user may set 'autoneg' here non-zero to have the
+ * pause parameters be auto-negotiated too. In such a case, the
+ * {rx,tx}_pause values below determine what capabilities are
+ * advertised.
+ *
+ * If 'autoneg' is zero or the link is not being auto-negotiated,
+ * then {rx,tx}_pause force the driver to use/not-use pause
+ * flow control.
+ */
+ u32 autoneg;
+ u32 rx_pause;
+ u32 tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
+ * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
+ * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN 32
+#endif
+
+#ifndef SPEED_2500
+#define SPEED_2500 2500
+#endif
+#ifndef SPEED_5000
+#define SPEED_5000 5000
+#endif
+
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
+#endif
+
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+
+#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
+#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
+#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
+#endif
+
+#ifndef RHEL_RELEASE_CODE
+/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
+#define RHEL_RELEASE_CODE 0
+#endif
+
+/* RHEL 7 didn't backport the parameter change in
+ * create_singlethread_workqueue.
+ * If/when RH corrects this we will want to tighten up the version check.
+ */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0))
+#undef create_singlethread_workqueue
+#define create_singlethread_workqueue(name) \
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+#endif
+
+/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find
+ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new
+ * enough versions of Ubuntu. Otherwise you can simply see it in the output of
+ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in
+ * the linux-source package, but in the linux-headers package. It begins to
+ * appear in later releases of 14.04 and 14.10.
+ *
+ * Ex:
+ * <Ubuntu 14.04.1>
+ * $uname -r
+ * 3.13.0-45-generic
+ * ABI is 45
+ *
+ * <Ubuntu 14.10>
+ * $uname -r
+ * 3.16.0-23-generic
+ * ABI is 23
+ */
+#ifndef UTS_UBUNTU_RELEASE_ABI
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#else
+/* Ubuntu does not provide actual release version macro, so we use the kernel
+ * version plus the ABI to generate a unique version code specific to Ubuntu.
+ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to
+ * ignore differences in sublevel which are not important since we have the
+ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for
+ * ordering checks.
+ */
+#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \
+ UTS_UBUNTU_RELEASE_ABI)
+
+#if UTS_UBUNTU_RELEASE_ABI > 255
+#error UTS_UBUNTU_RELEASE_ABI is too large...
+#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */
+
+#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) )
+/* Our version code scheme does not make sense for non 3.x or newer kernels,
+ * and we have no support in kcompat for this scenario. Thus, treat this as a
+ * non-Ubuntu kernel. Possibly might be better to error here.
+ */
+#define UTS_UBUNTU_RELEASE_ABI 0
+#define UBUNTU_VERSION_CODE 0
+#endif
+
+#endif
+
+/* Note that the 3rd digit is always zero, and will be ignored. This is
+ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux
+ * version codes are 3 digit, this 3rd digit is superseded by the ABI value.
+ */
+#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d))
+
+/* SuSE version macro is the same as Linux kernel version */
+#ifndef SLE_VERSION
+#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
+#endif
+#ifdef CONFIG_SUSE_KERNEL
+#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
+/* SLES11 GA is 2.6.27 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
+/* SLES11 SP1 is 2.6.32 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
+#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) )
+/* SLES11 SP2 is 3.0.13 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,2,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76)))
+/* SLES11 SP3 is 3.0.76 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)))
+/* SLES11 SP4 is 3.0.101 based */
+#define SLE_VERSION_CODE SLE_VERSION(11,4,0)
+#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)))
+/* SLES12 GA is 3.12.28 based */
+#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+/* new SLES kernels must be added here with >= based on kernel
+ * the idea is to order from newest to oldest and just catch all
+ * of them using the >=
+ */
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,47)))
+/* SLES12 SP1 is 3.12.47-based */
+#define SLE_VERSION_CODE SLE_VERSION(12,1,0)
+#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
+#endif /* CONFIG_SUSE_KERNEL */
+#ifndef SLE_VERSION_CODE
+#define SLE_VERSION_CODE 0
+#endif /* SLE_VERSION_CODE */
+
+#ifdef __KLOCWORK__
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+#endif /* __KLOCWORK__ */
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data) \
+ do { \
+ INIT_LIST_HEAD(&(_tq)->list); \
+ (_tq)->sync = 0; \
+ (_tq)->routine = _routine; \
+ (_tq)->data = _data; \
+ } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+/* Basic mode status register. */
+#define BMSR_ERCAP 0x0001 /* Ext-reg capability */
+#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
+#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
+#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
+#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA 0x0001 /* Only selector supported */
+#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
+#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
+#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+ ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+ PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+ ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+ #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+ const typeof(x) _x = (x); \
+ const typeof(y) _y = (y); \
+ (void) (&_x == &_y); \
+ _x > _y ? _x : _y; })
+
+#define min_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x < _y ? _x : _y; })
+
+#define max_t(type,x,y) ({ \
+ type _x = (x); \
+ type _y = (y); \
+ _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
+extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
+#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
+extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
+#else /* 2.4.8 => 2.4.9 */
+extern int snprintf(char * buf, size_t size, const char *fmt, ...);
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+#endif
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.12 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#endif /* !HAVE_NETIF_MSG */
+#endif /* 2.4.12 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax() rep_nop()
+#endif
+
+struct vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ unsigned short h_vlan_proto;
+ unsigned short h_vlan_TCI;
+ unsigned short h_vlan_encapsulated_proto;
+};
+#endif /* 2.4.13 => 2.4.12 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+ #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
+#define NETIF_MSG_HW 0x2000
+#define NETIF_MSG_WOL 0x4000
+
+#ifndef netif_msg_hw
+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+#endif
+#ifndef netif_msg_wol
+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+#endif
+#endif /* 2.4.18 */
+
+/*****************************************************************************/
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x) ((x)->slot_name)
+
+#ifndef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#endif
+#ifndef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#endif
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IGB_NO_LRO
+#define IGB_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+ /* No hurry */
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout(1);
+ }
+}
+#endif
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+ spin_lock_bh(&dev->xmit_lock);
+ netif_stop_queue(dev);
+ spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#else /* 2.4.23 => 2.4.22 */
+#define HAVE_SCTP
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
+#define __user
+#endif /* < 2.4.27 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#define sk_protocol protocol
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#include <linux/rtnetlink.h>
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+/*
+ * Most of the dma compat code is copied/modifed from the 2.4.37
+ * /include/linux/libata-compat.h header file
+ */
+/* These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts */
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+
+struct device {
+ struct pci_dev pdev;
+};
+
+static inline struct pci_dev *to_pci_dev (struct device *dev)
+{
+ return (struct pci_dev *) dev;
+}
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return (struct device *) pdev;
+}
+
+#define pdev_printk(lvl, pdev, fmt, args...) \
+ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
+#define dev_err(dev, fmt, args...) \
+ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
+#define dev_info(dev, fmt, args...) \
+ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
+#define dev_warn(dev, fmt, args...) \
+ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
+#define dev_notice(dev, fmt, args...) \
+ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
+#define dev_dbg(dev, fmt, args...) \
+ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
+
+/* NOTE: dangerous! we ignore the 'gfp' argument */
+#define dma_alloc_coherent(dev,sz,dma,gfp) \
+ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
+#define dma_free_coherent(dev,sz,addr,dma_addr) \
+ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
+
+#define dma_map_page(dev,a,b,c,d) \
+ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
+#define dma_unmap_page(dev,a,b,c) \
+ pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_single(dev,a,b,c) \
+ pci_map_single(to_pci_dev(dev),(a),(b),(c))
+#define dma_unmap_single(dev,a,b,c) \
+ pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
+
+#define dma_map_sg(dev, sg, nents, dir) \
+ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
+#define dma_unmap_sg(dev, sg, nents, dir) \
+ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
+
+#define dma_sync_single(dev,a,b,c) \
+ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
+
+/* for range just sync everything, that's all the pci API can do */
+#define dma_sync_single_range(dev,addr,off,sz,dir) \
+ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
+
+#define dma_set_mask(dev,mask) \
+ pci_set_dma_mask(to_pci_dev(dev),(mask))
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = NULL;
+ n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+#else
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+ return &pdev->dev;
+}
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#ifndef read_barrier_depends
+#define read_barrier_depends() rmb()
+#endif
+
+#ifndef rcu_head
+struct __kc_callback_head {
+ struct __kc_callback_head *next;
+ void (*func)(struct callback_head *head);
+};
+#define rcu_head __kc_callback_head
+#endif
+
+#undef get_cpu
+#define get_cpu() smp_processor_id()
+#undef put_cpu
+#define put_cpu() do { } while(0)
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+#endif
+
+#define dma_set_coherent_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+#ifdef MAX_NUMNODES
+#undef MAX_NUMNODES
+#endif
+#define MAX_NUMNODES 1
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (strchr(dev->name, '%'))
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#ifndef strlcpy
+#define strlcpy _kc_strlcpy
+extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
+#endif /* strlcpy */
+
+#ifndef do_div
+#if BITS_PER_LONG == 64
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+#elif BITS_PER_LONG == 32
+extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = _kc__div64_32(&(n), __base); \
+ __rem; \
+ })
+#else /* BITS_PER_LONG == ?? */
+# error do_div() does not yet support the C64
+#endif /* BITS_PER_LONG */
+#endif /* do_div */
+
+#ifndef NSEC_PER_SEC
+#define NSEC_PER_SEC 1000000000L
+#endif
+
+#undef HAVE_I2C_SUPPORT
+#else /* 2.6.0 */
+#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
+ (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
+#define HAVE_I2C_SUPPORT
+#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
+
+#endif /* 2.6.0 => 2.5.28 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
+#define dma_pool pci_pool
+#define dma_pool_destroy pci_pool_destroy
+#define dma_pool_alloc pci_pool_alloc
+#define dma_pool_free pci_pool_free
+
+#define dma_pool_create(name,dev,size,align,allocation) \
+ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
+#endif /* < 2.6.3 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define dma_sync_single_for_cpu dma_sync_single
+#define dma_sync_single_for_device dma_sync_single
+#define dma_sync_single_range_for_cpu dma_sync_single_range
+#define dma_sync_single_range_for_device dma_sync_single_range
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return dma_addr == 0;
+}
+#endif
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
+#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
+#endif /* < 2.6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+ if (nbits <= BITS_PER_LONG)
+ *dst = 0UL;
+ else {
+ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+#define page_to_nid(x) 0
+
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+#ifndef __force
+#define __force
+#endif
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x * HZ)/1000 + 2); \
+ } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC 1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+ return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+ return m * (HZ / MSEC_PER_SEC);
+#else
+ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+ while (timeout && !signal_pending(current)) {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+#ifndef __be16
+#define __be16 u16
+#endif
+#ifndef __be32
+#define __be32 u32
+#endif
+#ifndef __be64
+#define __be64 u64
+#endif
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->mac.raw;
+}
+
+/* Wake-On-Lan options. */
+#define WAKE_PHY (1 << 0)
+#define WAKE_UCAST (1 << 1)
+#define WAKE_MCAST (1 << 2)
+#define WAKE_BCAST (1 << 3)
+#define WAKE_ARP (1 << 4)
+#define WAKE_MAGIC (1 << 5)
+#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+
+#define skb_header_pointer _kc_skb_header_pointer
+static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
+ int offset, int len, void *buffer)
+{
+ int hlen = skb_headlen(skb);
+
+ if (hlen - offset >= len)
+ return skb->data + offset;
+
+#ifdef MAX_SKB_FRAGS
+ if (skb_copy_bits(skb, offset, buffer, len) < 0)
+ return NULL;
+
+ return buffer;
+#else
+ return NULL;
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+}
+
+#ifndef __bitwise
+#define __bitwise
+#endif
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm) \
+ static struct kparam_array __param_arr_##name \
+ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+ sizeof(array[0]), array }; \
+ module_param_call(name, param_array_set, param_array_get, \
+ &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+/*
+ * num_online is broken for all < 2.6.10 kernels. This is needed to support
+ * Node module parameter of ixgbe.
+ */
+#undef num_online_nodes
+#define num_online_nodes(n) 1
+extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
+#undef node_online_map
+#define node_online_map _kcompat_node_online_map
+#define pci_get_class pci_find_class
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0 0
+#define PCI_D1 1
+#define PCI_D2 2
+#define PCI_D3hot 3
+#define PCI_D3cold 4
+typedef int pci_power_t;
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC 1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+ return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+ return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+ return m * (HZ / KC_USEC_PER_SEC);
+#else
+ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+
+#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTSTA 32 /* Root Status */
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
+#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
+/* Link partner ability register. */
+#define LPA_PAUSE_CAP 0x0400 /* Can pause */
+#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
+#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
+/* 1000BASE-T Status register */
+#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
+#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
+
+#ifndef is_zero_ether_addr
+#define is_zero_ether_addr _kc_is_zero_ether_addr
+static inline int _kc_is_zero_ether_addr(const u8 *addr)
+{
+ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
+}
+#endif /* is_zero_ether_addr */
+#ifndef is_multicast_ether_addr
+#define is_multicast_ether_addr _kc_is_multicast_ether_addr
+static inline int _kc_is_multicast_ether_addr(const u8 *addr)
+{
+ return addr[0] & 0x01;
+}
+#endif /* is_multicast_ether_addr */
+#endif /* < 2.6.12 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
+#ifndef kstrdup
+#define kstrdup _kc_kstrdup
+extern char *_kc_kstrdup(const char *s, unsigned int gfp);
+#endif
+#endif /* < 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS 0x0f /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
+#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
+#define gfp_t unsigned
+#else
+typedef unsigned gfp_t;
+#endif
+#endif /* !RHEL4.3->RHEL5.0 */
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
+#ifdef CONFIG_X86_64
+#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
+ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
+#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
+ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
+#endif
+#endif
+#endif /* < 2.6.14 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef kfree_rcu
+/* this is placed here due to a lack of rcu_barrier in previous kernels */
+#define kfree_rcu(_ptr, _offset) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef vmalloc_node
+#define vmalloc_node(a,b) vmalloc(a)
+#endif /* vmalloc_node*/
+
+#define setup_timer(_timer, _function, _data) \
+do { \
+ (_timer)->function = _function; \
+ (_timer)->data = _data; \
+ init_timer(_timer); \
+} while (0)
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) do{}while(0)
+#endif
+#ifndef device_init_wakeup
+#define device_init_wakeup(dev,val) do {} while (0)
+#endif
+static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+ const u16 *a = (const u16 *) addr1;
+ const u16 *b = (const u16 *) addr2;
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+#undef compare_ether_addr
+#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef DEFINE_MUTEX
+#define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
+#define mutex_lock(x) down_interruptible(x)
+#define mutex_unlock(x) up(x)
+
+#ifndef ____cacheline_internodealigned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif /* CONFIG_SMP */
+#endif /* ____cacheline_internodealigned_in_smp */
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
+#ifdef device_can_wakeup
+#undef device_can_wakeup
+#endif /* device_can_wakeup */
+#define device_can_wakeup(dev) 1
+#endif /* SLE_VERSION(10,4,0) */
+#endif /* < 2.6.16 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
+#ifndef dev_notice
+#define dev_notice(dev, fmt, args...) \
+ dev_printk(KERN_NOTICE, dev, fmt, ## args)
+#endif
+
+#ifndef first_online_node
+#define first_online_node 0
+#endif
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD 16
+#endif
+#endif /* < 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef FIELD_SIZEOF
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#ifndef resource_size_t
+#define resource_size_t unsigned long
+#endif
+
+#ifdef skb_pad
+#undef skb_pad
+#endif
+#define skb_pad(x,y) _kc_skb_pad(x, y)
+int _kc_skb_pad(struct sk_buff *skb, int pad);
+#ifdef skb_padto
+#undef skb_padto
+#endif
+#define skb_padto(x,y) _kc_skb_padto(x, y)
+static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if(likely(size >= len))
+ return 0;
+ return _kc_skb_pad(skb, len - size);
+}
+
+#ifndef DECLARE_PCI_UNMAP_ADDR
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
+ dma_addr_t ADDR_NAME
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
+ u32 LEN_NAME
+#define pci_unmap_addr(PTR, ADDR_NAME) \
+ ((PTR)->ADDR_NAME)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ (((PTR)->ADDR_NAME) = (VAL))
+#define pci_unmap_len(PTR, LEN_NAME) \
+ ((PTR)->LEN_NAME)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
+ (((PTR)->LEN_NAME) = (VAL))
+#endif /* DECLARE_PCI_UNMAP_ADDR */
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0x00,
+ PCIE_LNK_X1 = 0x01,
+ PCIE_LNK_X2 = 0x02,
+ PCIE_LNK_X4 = 0x04,
+ PCIE_LNK_X8 = 0x08,
+ PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X16 = 0x10,
+ PCIE_LNK_X32 = 0x20,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+};
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
+#define i_private u.generic_ip
+#endif /* >= RHEL 5.0 */
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef __ALIGN_MASK
+#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#if (!((RHEL_RELEASE_CODE && \
+ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
+ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+ irq_handler_t new_handler = (irq_handler_t) handler;
+ return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while(0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
+
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev)
+{
+ return 0;
+}
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+
+extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
+#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#include <linux/string.h>
+#include <linux/pci_hotplug.h>
+#endif /* < 2.6.19 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->pending = 0; \
+ (_work)->func = (void (*)(void *))_func; \
+ (_work)->data = _work; \
+ init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev) \
+ PCI_VENDOR_ID_##ven, (dev), \
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#define HAVE_EARLY_VMALLOC_NODE
+#define dev_to_node(dev) -1
+#undef set_dev_node
+/* remove compiler warning with b=b, for unused variable */
+#define set_dev_node(a, b) do { (b) = (b); } while(0)
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+#endif
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
+static inline __wsum csum_unfold(__sum16 n)
+{
+ return (__force __wsum)n;
+}
+#endif
+
+#else /* < 2.6.20 */
+#define HAVE_DEVICE_NUMA_NODE
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define NETDEV_CLASS_DEV
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) \
+ do { \
+ if (vg) vg->vlan_devices[id] = dev; \
+ } while (0)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define pci_channel_offline(pdev) (pdev->error_state && \
+ pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+ pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+
+#ifndef __aligned
+#define __aligned(x) __attribute__((aligned(x)))
+#endif
+
+extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
+#define netdev_to_dev(netdev) \
+ pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
+#else
+static inline struct device *netdev_to_dev(struct net_device *netdev)
+{
+ return &netdev->dev;
+}
+
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_reset_tail_pointer(skb) \
+ do { \
+ skb->tail = skb->data; \
+ } while (0)
+#define skb_set_tail_pointer(skb, offset) \
+ do { \
+ skb->tail = skb->data + offset; \
+ } while (0)
+#define skb_copy_to_linear_data(skb, from, len) \
+ memcpy(skb->data, from, len)
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+ memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#ifndef udp_hdr
+#define udp_hdr _udp_hdr
+static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+#endif
+
+#ifdef cpu_to_be16
+#undef cpu_to_be16
+#endif
+#define cpu_to_be16(x) __constant_htons(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
+#ifndef hex_asc
+#define hex_asc(x) "0123456789abcdef"[x]
+#endif
+#include <linux/ctype.h>
+extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
+ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
+#ifndef ADVERTISED_2500baseX_Full
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#endif
+#ifndef SUPPORTED_2500baseX_Full
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#endif
+
+#ifdef HAVE_I2C_SUPPORT
+#include <linux/i2c.h>
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
+struct i2c_board_info {
+ char driver_name[KOBJ_NAME_LEN];
+ char type[I2C_NAME_SIZE];
+ unsigned short flags;
+ unsigned short addr;
+ void *platform_data;
+};
+#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
+ .addr = (dev_addr)
+#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
+#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
+extern struct i2c_client *
+_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+#endif /* HAVE_I2C_SUPPORT */
+
+#ifndef ETH_P_PAUSE
+#define ETH_P_PAUSE 0x8808
+#endif
+
+#else /* 2.6.22 */
+#define ETH_TYPE_TRANS_SETS_DEV
+#define HAVE_NETDEV_STATS_IN_NETDEV
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#undef SET_MODULE_OWNER
+#define SET_MODULE_OWNER(dev) do { } while (0)
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#define netif_subqueue_stopped(_a, _b) 0
+#ifndef PTR_ALIGN
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#endif
+
+#ifndef CONFIG_PM_SLEEP
+#define CONFIG_PM_SLEEP CONFIG_PM
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
+#define HAVE_ETHTOOL_GET_PERM_ADDR
+#endif /* 2.6.14 through 2.6.22 */
+
+static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+ int delta = 0;
+
+ if (headroom > (skb->data - skb->head))
+ delta = headroom - (skb->data - skb->head);
+
+ if (delta || skb_header_cloned(skb))
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+ GFP_ATOMIC);
+ return 0;
+}
+#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h))
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifndef ETH_FLAG_LRO
+#define ETH_FLAG_LRO NETIF_F_LRO
+#endif
+
+#ifndef ACCESS_ONCE
+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+#endif
+
+/* if GRO is supported then the napi struct must already exist */
+#ifndef NETIF_F_GRO
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+ /* used to look up the real NAPI polling routine */
+ int (*poll)(struct napi_struct *, int);
+ struct net_device *dev;
+ int weight;
+};
+#endif
+
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+/* The following definitions are multi-queue aware, and thus we have a driver
+ * define list which determines which drivers support multiple queues, and
+ * thus need these stronger defines. If a driver does not support multi-queue
+ * functionality, you don't need to add it to this list.
+ */
+extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
+
+static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ struct net_device *poll_dev = napi_to_poll_dev(napi);
+ poll_dev->poll = __kc_adapter_clean;
+ poll_dev->priv = napi;
+ poll_dev->weight = weight;
+ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state);
+ set_bit(__LINK_STATE_START, &poll_dev->state);
+ dev_hold(poll_dev);
+ napi->poll = poll;
+ napi->weight = weight;
+ napi->dev = dev;
+}
+#define netif_napi_add __kc_mq_netif_napi_add
+
+static inline void __kc_mq_netif_napi_del(struct napi_struct *napi)
+{
+ struct net_device *poll_dev = napi_to_poll_dev(napi);
+ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state));
+ dev_put(poll_dev);
+ memset(poll_dev, 0, sizeof(struct net_device));
+}
+
+#define netif_napi_del __kc_mq_netif_napi_del
+
+static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi)
+{
+ return netif_running(napi->dev) &&
+ netif_rx_schedule_prep(napi_to_poll_dev(napi));
+}
+#define napi_schedule_prep __kc_mq_napi_schedule_prep
+
+static inline void __kc_mq_napi_schedule(struct napi_struct *napi)
+{
+ if (napi_schedule_prep(napi))
+ __netif_rx_schedule(napi_to_poll_dev(napi));
+}
+#define napi_schedule __kc_mq_napi_schedule
+
+#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
+#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
+#ifdef CONFIG_SMP
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+ struct net_device *dev = napi_to_poll_dev(n);
+
+ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
+ /* No hurry. */
+ msleep(1);
+ }
+}
+#else
+#define napi_synchronize(n) barrier()
+#endif /* CONFIG_SMP */
+#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
+static inline void _kc_napi_complete(struct napi_struct *napi)
+{
+#ifdef NETIF_F_GRO
+ napi_gro_flush(napi);
+#endif
+ netif_rx_complete(napi_to_poll_dev(napi));
+}
+#define napi_complete _kc_napi_complete
+#else /* NAPI */
+
+/* The following definitions are only used if we don't support NAPI at all. */
+
+static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ dev->poll = poll;
+ dev->weight = weight;
+ napi->poll = poll;
+ napi->weight = weight;
+ napi->dev = dev;
+}
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
+#endif
+
+#ifdef NETIF_F_TSO6
+#define skb_is_gso_v6 _kc_skb_is_gso_v6
+static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+#endif /* NETIF_F_TSO6 */
+
+#ifndef KERN_CONT
+#define KERN_CONT ""
+#endif
+#ifndef pr_err
+#define pr_err(fmt, arg...) \
+ printk(KERN_ERR fmt, ##arg)
+#endif
+
+#ifndef rounddown_pow_of_two
+#define rounddown_pow_of_two(n) \
+ __builtin_constant_p(n) ? ( \
+ (n == 1) ? 0 : \
+ (1UL << ilog2(n))) : \
+ (1UL << (fls_long(n) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#else /* < 2.6.24 */
+#define HAVE_ETHTOOL_GET_SSET_COUNT
+#define HAVE_NETDEV_NAPI_LIST
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#define INCLUDE_PM_QOS_PARAMS_H
+#include <linux/pm_qos_params.h>
+#else /* >= 3.2.0 */
+#include <linux/pm_qos.h>
+#endif /* else >= 3.2.0 */
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+ set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+ remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+ modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+ if (value != PM_QOS_DEFAULT_VALUE) { \
+ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+ pci_name(adapter->pdev)); \
+ } \
+}
+
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#ifndef DEFINE_PCI_DEVICE_TABLE
+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
+#endif /* DEFINE_PCI_DEVICE_TABLE */
+
+#ifndef strict_strtol
+#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r)
+static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res)
+{
+ /* adapted from strict_strtoul() in 2.6.25 */
+ char *tail;
+ long val;
+ size_t len;
+
+ *res = 0;
+ len = strlen(buf);
+ if (!len)
+ return -EINVAL;
+ val = simple_strtol(buf, &tail, base);
+ if (tail == buf)
+ return -EINVAL;
+ if ((*tail == '\0') ||
+ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) {
+ *res = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+#endif
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#ifndef IGB_PROCFS
+#define IGB_PROCFS
+#endif /* IGB_PROCFS */
+#endif /* >= 2.6.0 */
+
+#else /* < 2.6.25 */
+
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef IGB_HWMON
+#define IGB_HWMON
+#endif /* IGB_HWMON */
+#endif /* CONFIG_HWMON */
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifndef clamp_t
+#define clamp_t(type, val, min, max) ({ \
+ type __val = (val); \
+ type __min = (min); \
+ type __max = (max); \
+ __val = __val < __min ? __min : __val; \
+ __val > __max ? __max : __val; })
+#endif /* clamp_t */
+#undef kzalloc_node
+#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
+
+extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
+#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
+#else /* < 2.6.26 */
+#define NETDEV_CAN_SET_GSO_MAX_SIZE
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#ifndef PCI_EXP_LNKCAP_ASPMS
+#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
+#endif /* PCI_EXP_LNKCAP_ASPMS */
+#endif /* < 2.6.26 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+ __u32 speed)
+{
+ ep->speed = (__u16)speed;
+ /* ep->speed_hi = (__u16)(speed >> 16); */
+}
+#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
+
+static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
+{
+ /* no speed_hi before 2.6.27, and probably no need for it yet */
+ return (__u32)ep->speed;
+}
+#define ethtool_cmd_speed _kc_ethtool_cmd_speed
+
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
+#define ANCIENT_PM 1
+#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
+ defined(CONFIG_PM_SLEEP))
+#define NEWER_PM 1
+#endif
+#if defined(ANCIENT_PM) || defined(NEWER_PM)
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+ do { \
+ u16 pmc = 0; \
+ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+ if (pm) { \
+ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+ &pmc); \
+ } \
+ (dev)->power.can_wakeup = !!(pmc >> 11); \
+ (dev)->power.should_wakeup = (val && (pmc >> 11)); \
+ } while (0)
+#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
+#endif /* 2.6.15 through 2.6.27 */
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifdef dma_mapping_error
+#undef dma_mapping_error
+#endif
+#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#define HAVE_TX_MQ
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_stop_subqueue((_ndev), (_qi)); \
+ else \
+ netif_stop_queue((_ndev)); \
+ } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+ if (netif_is_multiqueue((_ndev))) \
+ netif_start_subqueue((_ndev), (_qi)); \
+ else \
+ netif_start_queue((_ndev)); \
+ } while (0)
+#else /* HAVE_TX_MQ */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#else
+#define netif_tx_start_all_queues(a) do {} while (0)
+#endif
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* HAVE_TX_MQ */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef __WARN_printf
+extern void __kc_warn_slowpath(const char *file, const int line,
+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
+#endif /* __WARN_printf */
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif /* WARN */
+#undef HAVE_IXGBE_DEBUG_FS
+#undef HAVE_IGB_DEBUG_FS
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#define HAVE_NETDEV_SELECT_QUEUE
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_IXGBE_DEBUG_FS
+#define HAVE_IGB_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+#endif /* < 2.6.27 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
+#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
+ pci_resource_len(pdev, bar))
+#define pci_wake_from_d3 _kc_pci_wake_from_d3
+#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
+extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
+extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
+#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
+#ifndef __skb_queue_head_init
+static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
+#endif
+
+#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+
+#endif /* < 2.6.28 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
+#ifndef swap
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#endif
+#define pci_request_selected_regions_exclusive(pdev, bars, name) \
+ pci_request_selected_regions(pdev, bars, name)
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif /* CONFIG_NR_CPUS */
+#ifndef pcie_aspm_enabled
+#define pcie_aspm_enabled() (1)
+#endif /* pcie_aspm_enabled */
+
+#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
+
+#ifndef PCI_EXP_LNKSTA_CLS
+#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
+#endif
+#ifndef PCI_EXP_LNKSTA_NLW
+#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */
+#endif
+
+#ifndef pci_clear_master
+extern void _kc_pci_clear_master(struct pci_dev *dev);
+#define pci_clear_master(dev) _kc_pci_clear_master(dev)
+#endif
+
+#ifndef PCI_EXP_LNKCTL_ASPMC
+#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
+#endif
+#else /* < 2.6.29 */
+#ifndef HAVE_NET_DEVICE_OPS
+#define HAVE_NET_DEVICE_OPS
+#endif
+#ifdef CONFIG_DCB
+#define HAVE_PFC_MODE_ENABLE
+#endif /* CONFIG_DCB */
+#endif /* < 2.6.29 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
+#define NO_PTP_SUPPORT
+#define skb_rx_queue_recorded(a) false
+#define skb_get_rx_queue(a) 0
+#define skb_record_rx_queue(a, b) do {} while (0)
+#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
+#ifndef CONFIG_PCI_IOV
+#undef pci_enable_sriov
+#define pci_enable_sriov(a, b) -ENOTSUPP
+#undef pci_disable_sriov
+#define pci_disable_sriov(a) do {} while (0)
+#endif /* CONFIG_PCI_IOV */
+#ifndef pr_cont
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+#endif /* pr_cont */
+static inline void _kc_synchronize_irq(unsigned int a)
+{
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+ synchronize_irq();
+#else /* < 2.5.28 */
+ synchronize_irq(a);
+#endif /* < 2.5.28 */
+}
+#undef synchronize_irq
+#define synchronize_irq(a) _kc_synchronize_irq(a)
+
+#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+
+#ifdef nr_cpus_node
+#undef nr_cpus_node
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
+#endif
+
+#else /* < 2.6.30 */
+#define HAVE_ASPM_QUIRKS
+#endif /* < 2.6.30 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
+#define ETH_P_1588 0x88F7
+#define ETH_P_FIP 0x8914
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc_count)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(uclist, dev) \
+ for (uclist = dev->uc_list; uclist; uclist = uclist->next)
+#endif
+#ifndef PORT_OTHER
+#define PORT_OTHER 0xff
+#endif
+#ifndef MDIO_PHY_ID_PRTAD
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#endif
+#ifndef MDIO_PHY_ID_DEVAD
+#define MDIO_PHY_ID_DEVAD 0x001f
+#endif
+#ifndef skb_dst
+#define skb_dst(s) ((s)->dst)
+#endif
+
+#ifndef SUPPORTED_1000baseKX_Full
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef SUPPORTED_10000baseKX4_Full
+#define SUPPORTED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef SUPPORTED_10000baseKR_Full
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#endif
+
+#ifndef ADVERTISED_1000baseKX_Full
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#endif
+#ifndef ADVERTISED_10000baseKX4_Full
+#define ADVERTISED_10000baseKX4_Full (1 << 18)
+#endif
+#ifndef ADVERTISED_10000baseKR_Full
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#endif
+
+#else /* < 2.6.31 */
+#ifndef HAVE_NETDEV_STORAGE_ADDRESS
+#define HAVE_NETDEV_STORAGE_ADDRESS
+#endif
+#ifndef HAVE_NETDEV_HW_ADDR
+#define HAVE_NETDEV_HW_ADDR
+#endif
+#ifndef HAVE_TRANS_START_IN_QUEUE
+#define HAVE_TRANS_START_IN_QUEUE
+#endif
+#ifndef HAVE_INCLUDE_LINUX_MDIO_H
+#define HAVE_INCLUDE_LINUX_MDIO_H
+#endif
+#include <linux/mdio.h>
+#endif /* < 2.6.31 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
+#undef netdev_tx_t
+#define netdev_tx_t int
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline int _kc_pm_runtime_get_sync()
+{
+ return 1;
+}
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
+#else /* 2.6.0 => 2.6.32 */
+static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev)
+{
+ return 1;
+}
+#ifndef pm_runtime_get_sync
+#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
+#endif
+#endif /* 2.6.0 => 2.6.32 */
+#ifndef pm_runtime_put
+#define pm_runtime_put(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_sync
+#define pm_runtime_put_sync(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_resume
+#define pm_runtime_resume(dev) do {} while (0)
+#endif
+#ifndef pm_schedule_suspend
+#define pm_schedule_suspend(dev, t) do {} while (0)
+#endif
+#ifndef pm_runtime_set_suspended
+#define pm_runtime_set_suspended(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_disable
+#define pm_runtime_disable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_put_noidle
+#define pm_runtime_put_noidle(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_set_active
+#define pm_runtime_set_active(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_enable
+#define pm_runtime_enable(dev) do {} while (0)
+#endif
+#ifndef pm_runtime_get_noresume
+#define pm_runtime_get_noresume(dev) do {} while (0)
+#endif
+#else /* < 2.6.32 */
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_EXTENDED
+#endif /* RHEL >= 6.2 && RHEL < 7.0 */
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_NET_DEVICE_OPS_EXT
+#define HAVE_NDO_SET_FEATURES
+#endif /* RHEL >= 6.6 && RHEL < 7.0 */
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_OPS_GETAPP
+#define HAVE_DCBNL_OPS_GETAPP
+#endif
+#endif /* CONFIG_DCB */
+#include <linux/pm_runtime.h>
+/* IOV bad DMA target work arounds require at least this kernel rev support */
+#define HAVE_PCIE_TYPE
+#endif /* < 2.6.32 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
+#ifndef pci_pcie_cap
+#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
+#endif
+#ifndef IPV4_FLOW
+#define IPV4_FLOW 0x10
+#endif /* IPV4_FLOW */
+#ifndef IPV6_FLOW
+#define IPV6_FLOW 0x11
+#endif /* IPV6_FLOW */
+/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
+#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
+#endif /* RHEL6 or SLES11 SP1 */
+#ifndef __percpu
+#define __percpu
+#endif /* __percpu */
+#ifndef PORT_DA
+#define PORT_DA PORT_OTHER
+#endif
+#ifndef PORT_NONE
+#define PORT_NONE PORT_OTHER
+#endif
+
+#if ((RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
+#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
+#undef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#undef DEFINE_DMA_UNMAP_LEN
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#undef dma_unmap_addr
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#undef dma_unmap_addr_set
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#undef dma_unmap_len
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#undef dma_unmap_len_set
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
+ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return !!pci_pcie_cap(dev);
+}
+#endif /* RHEL_RELEASE_CODE */
+
+#if (!(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
+#define sk_tx_queue_get(_sk) (-1)
+#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
+#endif /* !(RHEL >= 6.2) */
+
+#if (RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#define HAVE_ETHTOOL_GET_TS_INFO
+#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5))
+#define HAVE_ETHTOOL_GSRSSH
+#define HAVE_RHEL6_SRIOV_CONFIGURE
+#define HAVE_RXFH_NONCONST
+#endif /* RHEL > 6.5 */
+#endif /* RHEL >= 6.4 && RHEL < 7.0 */
+
+#else /* < 2.6.33 */
+#endif /* < 2.6.33 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
+#ifndef pci_num_vf
+#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
+extern int _kc_pci_num_vf(struct pci_dev *dev);
+#endif
+#endif /* RHEL_RELEASE_CODE */
+
+#ifndef ETH_FLAG_NTUPLE
+#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
+#endif
+
+#ifndef netdev_mc_count
+#define netdev_mc_count(dev) ((dev)->mc_count)
+#endif
+#ifndef netdev_mc_empty
+#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_mc_addr
+#define netdev_for_each_mc_addr(mclist, dev) \
+ for (mclist = dev->mc_list; mclist; mclist = mclist->next)
+#endif
+#ifndef netdev_uc_count
+#define netdev_uc_count(dev) ((dev)->uc.count)
+#endif
+#ifndef netdev_uc_empty
+#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
+#endif
+#ifndef netdev_for_each_uc_addr
+#define netdev_for_each_uc_addr(ha, dev) \
+ list_for_each_entry(ha, &dev->uc.list, list)
+#endif
+#ifndef dma_set_coherent_mask
+#define dma_set_coherent_mask(dev,mask) \
+ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
+#endif
+#ifndef pci_dev_run_wake
+#define pci_dev_run_wake(pdev) (0)
+#endif
+
+/* netdev logging taken from include/linux/netdevice.h */
+#ifndef netdev_name
+static inline const char *_kc_netdev_name(const struct net_device *dev)
+{
+ if (dev->reg_state != NETREG_REGISTERED)
+ return "(unregistered net_device)";
+ return dev->name;
+}
+#define netdev_name(netdev) _kc_netdev_name(netdev)
+#endif /* netdev_name */
+
+#undef netdev_printk
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
+ printk(level "%s: " format, pci_name(pdev), ##args); \
+} while(0)
+#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define netdev_printk(level, netdev, format, args...) \
+do { \
+ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
+ struct device *dev = pci_dev_to_dev(pdev); \
+ dev_printk(level, dev, "%s: " format, \
+ netdev_name(netdev), ##args); \
+} while(0)
+#else /* 2.6.21 => 2.6.34 */
+#define netdev_printk(level, netdev, format, args...) \
+ dev_printk(level, (netdev)->dev.parent, \
+ "%s: " format, \
+ netdev_name(netdev), ##args)
+#endif /* <2.6.0 <2.6.21 <2.6.34 */
+#undef netdev_emerg
+#define netdev_emerg(dev, format, args...) \
+ netdev_printk(KERN_EMERG, dev, format, ##args)
+#undef netdev_alert
+#define netdev_alert(dev, format, args...) \
+ netdev_printk(KERN_ALERT, dev, format, ##args)
+#undef netdev_crit
+#define netdev_crit(dev, format, args...) \
+ netdev_printk(KERN_CRIT, dev, format, ##args)
+#undef netdev_err
+#define netdev_err(dev, format, args...) \
+ netdev_printk(KERN_ERR, dev, format, ##args)
+#undef netdev_warn
+#define netdev_warn(dev, format, args...) \
+ netdev_printk(KERN_WARNING, dev, format, ##args)
+#undef netdev_notice
+#define netdev_notice(dev, format, args...) \
+ netdev_printk(KERN_NOTICE, dev, format, ##args)
+#undef netdev_info
+#define netdev_info(dev, format, args...) \
+ netdev_printk(KERN_INFO, dev, format, ##args)
+#undef netdev_dbg
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
+ netdev_name(__dev), ##args); \
+} while (0)
+#else /* DEBUG */
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+ 0; \
+})
+#endif /* DEBUG */
+
+#undef netif_printk
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#undef netif_emerg
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#undef netif_alert
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#undef netif_crit
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#undef netif_err
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#undef netif_warn
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#undef netif_notice
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#undef netif_info
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+#undef netif_dbg
+#define netif_dbg(priv, type, dev, fmt, args...) \
+ netif_level(dbg, priv, type, dev, fmt, ##args)
+
+#ifdef SET_SYSTEM_SLEEP_PM_OPS
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#endif
+
+#ifndef for_each_set_bit
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit */
+
+#ifndef DEFINE_DMA_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
+#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
+#define dma_unmap_addr pci_unmap_addr
+#define dma_unmap_addr_set pci_unmap_addr_set
+#define dma_unmap_len pci_unmap_len
+#define dma_unmap_len_set pci_unmap_len_set
+#endif /* DEFINE_DMA_UNMAP_ADDR */
+
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
+#ifdef IGB_HWMON
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr) \
+ do { \
+ static struct lock_class_key __key; \
+ (attr)->key = &__key; \
+ } while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* IGB_HWMON */
+#endif /* RHEL_RELEASE_CODE */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+static inline bool _kc_pm_runtime_suspended()
+{
+ return false;
+}
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
+#else /* 2.6.0 => 2.6.34 */
+static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev)
+{
+ return false;
+}
+#ifndef pm_runtime_suspended
+#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
+#endif
+#endif /* 2.6.0 => 2.6.34 */
+
+#ifndef pci_bus_speed
+/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */
+enum _kc_pci_bus_speed {
+ _KC_PCIE_SPEED_2_5GT = 0x14,
+ _KC_PCIE_SPEED_5_0GT = 0x15,
+ _KC_PCIE_SPEED_8_0GT = 0x16,
+ _KC_PCI_SPEED_UNKNOWN = 0xff,
+};
+#define pci_bus_speed _kc_pci_bus_speed
+#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT
+#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT
+#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT
+#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN
+#endif /* pci_bus_speed */
+
+#else /* < 2.6.34 */
+#define HAVE_SYSTEM_SLEEP_PM_OPS
+#ifndef HAVE_SET_RX_MODE
+#define HAVE_SET_RX_MODE
+#endif
+
+#endif /* < 2.6.34 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
+ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count);
+#define simple_write_to_buffer _kc_simple_write_to_buffer
+
+#ifndef PCI_EXP_LNKSTA_NLW_SHIFT
+#define PCI_EXP_LNKSTA_NLW_SHIFT 4
+#endif
+
+#ifndef numa_node_id
+#define numa_node_id() 0
+#endif
+#ifndef numa_mem_id
+#define numa_mem_id numa_node_id
+#endif
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
+#ifdef HAVE_TX_MQ
+#include <net/sch_generic.h>
+#ifndef CONFIG_NETDEVICES_MULTIQUEUE
+int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
+#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq)
+{
+ dev->egress_subqueue_count = txq;
+ return 0;
+}
+#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
+#else /* HAVE_TX_MQ */
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev,
+ unsigned int __always_unused txq)
+{
+ return 0;
+}
+#endif /* HAVE_TX_MQ */
+#define netif_set_real_num_tx_queues(dev, txq) \
+ _kc_netif_set_real_num_tx_queues(dev, txq)
+#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
+#ifndef ETH_FLAG_RXHASH
+#define ETH_FLAG_RXHASH (1<<28)
+#endif /* ETH_FLAG_RXHASH */
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
+#define HAVE_IRQ_AFFINITY_HINT
+#endif
+#else /* < 2.6.35 */
+#define HAVE_PM_QOS_REQUEST_LIST
+#define HAVE_IRQ_AFFINITY_HINT
+#endif /* < 2.6.35 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
+#define ethtool_op_set_flags _kc_ethtool_op_set_flags
+extern u32 _kc_ethtool_op_get_flags(struct net_device *);
+#define ethtool_op_get_flags _kc_ethtool_op_get_flags
+
+enum {
+ WQ_UNBOUND = 0,
+ WQ_RESCUER = 0,
+};
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#ifdef NET_IP_ALIGN
+#undef NET_IP_ALIGN
+#endif
+#define NET_IP_ALIGN 0
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+#ifdef NET_SKB_PAD
+#undef NET_SKB_PAD
+#endif
+
+#if (L1_CACHE_BYTES > 32)
+#define NET_SKB_PAD L1_CACHE_BYTES
+#else
+#define NET_SKB_PAD 32
+#endif
+
+static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
+ if (skb) {
+#if (NET_IP_ALIGN + NET_SKB_PAD)
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+#endif
+ skb->dev = dev;
+ }
+ return skb;
+}
+
+#ifdef netdev_alloc_skb_ip_align
+#undef netdev_alloc_skb_ip_align
+#endif
+#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
+
+#undef netif_level
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#undef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#define u64_stats_update_begin(a) do { } while(0)
+#define u64_stats_update_end(a) do { } while(0)
+#define u64_stats_fetch_begin(a) do { } while(0)
+#define u64_stats_fetch_retry_bh(a,b) (0)
+#define u64_stats_fetch_begin_bh(a) (0)
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
+#define HAVE_8021P_SUPPORT
+#endif
+
+/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0)))
+static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb)
+{
+ return;
+}
+#endif
+
+#else /* < 2.6.36 */
+
+#define HAVE_PM_QOS_REQUEST_ACTIVE
+#define HAVE_8021P_SUPPORT
+#define HAVE_NDO_GET_STATS64
+#endif /* < 2.6.36 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
+#define HAVE_NON_CONST_PCI_DRIVER_NAME
+#ifndef netif_set_real_num_tx_queues
+static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev,
+ unsigned int txq)
+{
+ netif_set_real_num_tx_queues(dev, txq);
+ return 0;
+}
+#define netif_set_real_num_tx_queues(dev, txq) \
+ _kc_netif_set_real_num_tx_queues(dev, txq)
+#endif
+#ifndef netif_set_real_num_rx_queues
+static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev,
+ unsigned int __always_unused rxq)
+{
+ return 0;
+}
+#define netif_set_real_num_rx_queues(dev, rxq) \
+ __kc_netif_set_real_num_rx_queues((dev), (rxq))
+#endif
+#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
+#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
+#endif
+#ifndef VLAN_N_VID
+#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
+#endif /* VLAN_N_VID */
+#ifndef ETH_FLAG_TXVLAN
+#define ETH_FLAG_TXVLAN (1 << 7)
+#endif /* ETH_FLAG_TXVLAN */
+#ifndef ETH_FLAG_RXVLAN
+#define ETH_FLAG_RXVLAN (1 << 8)
+#endif /* ETH_FLAG_RXVLAN */
+
+#define WQ_MEM_RECLAIM WQ_RESCUER
+
+static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
+{
+ WARN_ON(skb->ip_summed != CHECKSUM_NONE);
+}
+#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
+
+static inline void *_kc_vzalloc_node(unsigned long size, int node)
+{
+ void *addr = vmalloc_node(size, node);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
+
+static inline void *_kc_vzalloc(unsigned long size)
+{
+ void *addr = vmalloc(size);
+ if (addr)
+ memset(addr, 0, size);
+ return addr;
+}
+#define vzalloc(_size) _kc_vzalloc(_size)
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \
+ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0)))
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+{
+ if (vlan_tx_tag_present(skb) ||
+ skb->protocol != cpu_to_be16(ETH_P_8021Q))
+ return skb->protocol;
+
+ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
+ return 0;
+
+ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
+}
+#endif /* !RHEL5.7+ || RHEL6.0 */
+
+#ifdef HAVE_HW_TIME_STAMP
+#define SKBTX_HW_TSTAMP (1 << 0)
+#define SKBTX_IN_PROGRESS (1 << 2)
+#define SKB_SHARED_TX_IS_UNION
+#endif
+
+#ifndef device_wakeup_enable
+#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
+#endif
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
+#ifndef HAVE_VLAN_RX_REGISTER
+#define HAVE_VLAN_RX_REGISTER
+#endif
+#endif /* > 2.4.18 */
+#endif /* < 2.6.37 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
+#else /* 2.6.22 -> 2.6.37 */
+static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
+#endif /* 2.6.22 -> 2.6.37 */
+#if IS_ENABLED(CONFIG_DCB)
+#ifndef IEEE_8021QAZ_MAX_TCS
+#define IEEE_8021QAZ_MAX_TCS 8
+#endif
+#ifndef DCB_CAP_DCBX_HOST
+#define DCB_CAP_DCBX_HOST 0x01
+#endif
+#ifndef DCB_CAP_DCBX_LLD_MANAGED
+#define DCB_CAP_DCBX_LLD_MANAGED 0x02
+#endif
+#ifndef DCB_CAP_DCBX_VER_CEE
+#define DCB_CAP_DCBX_VER_CEE 0x04
+#endif
+#ifndef DCB_CAP_DCBX_VER_IEEE
+#define DCB_CAP_DCBX_VER_IEEE 0x08
+#endif
+#ifndef DCB_CAP_DCBX_STATIC
+#define DCB_CAP_DCBX_STATIC 0x10
+#endif
+#endif /* CONFIG_DCB */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
+#define CONFIG_XPS
+#endif /* RHEL_RELEASE_VERSION(6,2) */
+#endif /* < 2.6.38 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
+#ifndef TC_BITMASK
+#define TC_BITMASK 15
+#endif
+#ifndef NETIF_F_RXCSUM
+#define NETIF_F_RXCSUM (1 << 29)
+#endif
+#ifndef skb_queue_reverse_walk_safe
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+#endif
+
+#ifndef udp_csum
+#define udp_csum __kc_udp_csum
+static inline __wsum __kc_udp_csum(struct sk_buff *skb)
+{
+ __wsum csum = csum_partial(skb_transport_header(skb),
+ sizeof(struct udphdr), skb->csum);
+
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) {
+ csum = csum_add(csum, skb->csum);
+ }
+ return csum;
+}
+#endif /* udp_csum */
+#else /* < 2.6.39 */
+#ifndef HAVE_MQPRIO
+#define HAVE_MQPRIO
+#endif
+#ifndef HAVE_SETUP_TC
+#define HAVE_SETUP_TC
+#endif
+#ifdef CONFIG_DCB
+#ifndef HAVE_DCBNL_IEEE
+#define HAVE_DCBNL_IEEE
+#endif
+#endif /* CONFIG_DCB */
+#ifndef HAVE_NDO_SET_FEATURES
+#define HAVE_NDO_SET_FEATURES
+#endif
+#endif /* < 2.6.39 */
+
+/*****************************************************************************/
+/* use < 2.6.40 because of a Fedora 15 kernel update where they
+ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
+ * like set_phys_id for ethtool.
+ */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
+#ifdef ETHTOOL_GRXRINGS
+#ifndef FLOW_EXT
+#define FLOW_EXT 0x80000000
+union _kc_ethtool_flow_union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[60];
+};
+struct _kc_ethtool_flow_ext {
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ __be32 data[2];
+};
+struct _kc_ethtool_rx_flow_spec {
+ __u32 flow_type;
+ union _kc_ethtool_flow_union h_u;
+ struct _kc_ethtool_flow_ext h_ext;
+ union _kc_ethtool_flow_union m_u;
+ struct _kc_ethtool_flow_ext m_ext;
+ __u64 ring_cookie;
+ __u32 location;
+};
+#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
+#endif /* FLOW_EXT */
+#endif
+
+#define pci_disable_link_state_locked pci_disable_link_state
+
+#ifndef PCI_LTR_VALUE_MASK
+#define PCI_LTR_VALUE_MASK 0x000003ff
+#endif
+#ifndef PCI_LTR_SCALE_MASK
+#define PCI_LTR_SCALE_MASK 0x00001c00
+#endif
+#ifndef PCI_LTR_SCALE_SHIFT
+#define PCI_LTR_SCALE_SHIFT 10
+#endif
+
+#else /* < 2.6.40 */
+#define HAVE_ETHTOOL_SET_PHYS_ID
+#endif /* < 2.6.40 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
+#define USE_LEGACY_PM_SUPPORT
+#ifndef kfree_rcu
+#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr)
+#endif /* kfree_rcu */
+#ifndef kstrtol_from_user
+#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r)
+static inline int _kc_kstrtol_from_user(const char __user *s, size_t count,
+ unsigned int base, long *res)
+{
+ /* sign, base 2 representation, newline, terminator */
+ char buf[1 + sizeof(long) * 8 + 1 + 1];
+
+ count = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, s, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ return strict_strtol(buf, base, res);
+}
+#endif
+
+/* 20000base_blah_full Supported and Advertised Registers */
+#define SUPPORTED_20000baseMLD2_Full (1 << 21)
+#define SUPPORTED_20000baseKR2_Full (1 << 22)
+#define ADVERTISED_20000baseMLD2_Full (1 << 21)
+#define ADVERTISED_20000baseKR2_Full (1 << 22)
+#endif /* < 3.0.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
+#ifndef __netdev_alloc_skb_ip_align
+#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
+#endif /* __netdev_alloc_skb_ip_align */
+#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
+#define dcb_ieee_delapp(dev, app) 0
+#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
+
+/* 1000BASE-T Control register */
+#define CTL1000_AS_MASTER 0x0800
+#define CTL1000_ENABLE_MASTER 0x1000
+
+/* kernels less than 3.0.0 don't have this */
+#ifndef ETH_P_8021AD
+#define ETH_P_8021AD 0x88A8
+#endif
+#else /* < 3.1.0 */
+#ifndef HAVE_DCBNL_IEEE_DELAPP
+#define HAVE_DCBNL_IEEE_DELAPP
+#endif
+#ifdef CONFIG_MACH_APALIS_T30
+#define HAVE_ETHTOOL_GET_TS_INFO
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#endif
+#endif /* < 3.1.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
+#ifndef dma_zalloc_coherent
+#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f)
+static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
+ if (ret)
+ memset(ret, 0, size);
+ return ret;
+}
+#endif
+#ifdef ETHTOOL_GRXRINGS
+#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+#endif /* ETHTOOL_GRXRINGS */
+
+#ifndef skb_frag_size
+#define skb_frag_size(frag) _kc_skb_frag_size(frag)
+static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+#endif /* skb_frag_size */
+
+#ifndef skb_frag_size_sub
+#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
+static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+#endif /* skb_frag_size_sub */
+
+#ifndef skb_frag_page
+#define skb_frag_page(frag) _kc_skb_frag_page(frag)
+static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
+{
+ return frag->page;
+}
+#endif /* skb_frag_page */
+
+#ifndef skb_frag_address
+#define skb_frag_address(frag) _kc_skb_frag_address(frag)
+static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
+{
+ return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+#endif /* skb_frag_address */
+
+#ifndef skb_frag_dma_map
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
+#include <linux/dma-mapping.h>
+#endif
+#define skb_frag_dma_map(dev,frag,offset,size,dir) \
+ _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
+static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_page(dev, skb_frag_page(frag),
+ frag->page_offset + offset, size, dir);
+}
+#endif /* skb_frag_dma_map */
+
+#ifndef __skb_frag_unref
+#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
+static inline void __kc_skb_frag_unref(skb_frag_t *frag)
+{
+ put_page(skb_frag_page(frag));
+}
+#endif /* __skb_frag_unref */
+
+#ifndef SPEED_UNKNOWN
+#define SPEED_UNKNOWN -1
+#endif
+#ifndef DUPLEX_UNKNOWN
+#define DUPLEX_UNKNOWN 0xff
+#endif
+#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#endif
+#endif
+#else /* < 3.2.0 */
+#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_PCI_DEV_FLAGS_ASSIGNED
+#define HAVE_VF_SPOOFCHK_CONFIGURE
+#endif
+#ifndef HAVE_SKB_L4_RXHASH
+#define HAVE_SKB_L4_RXHASH
+#endif
+#define HAVE_IOMMU_PRESENT
+#define HAVE_PM_QOS_REQUEST_LIST_NEW
+#endif /* < 3.2.0 */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
+#undef ixgbe_get_netdev_tc_txq
+#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
+#endif
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
+/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than
+ * alloc_workqueue() to avoid compiler warning from -Wvarargs
+ */
+static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4)))
+_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active,
+ const char *fmt, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args, temp;
+ unsigned int len;
+ char *p;
+
+ va_start(args, fmt);
+ va_copy(temp, args);
+ len = vsnprintf(NULL, 0, fmt, temp);
+ va_end(temp);
+
+ p = kmalloc(len + 1, GFP_KERNEL);
+ if (!p) {
+ va_end(args);
+ return NULL;
+ }
+
+ vsnprintf(p, len + 1, fmt, args);
+ va_end(args);
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
+ wq = create_workqueue(p);
+#else
+ wq = alloc_workqueue(p, flags, max_active);
+#endif
+ kfree(p);
+
+ return wq;
+}
+#ifdef alloc_workqueue
+#undef alloc_workqueue
+#endif
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+ _kc_alloc_workqueue(flags, max_active, fmt, ##args)
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
+typedef u32 netdev_features_t;
+#endif
+#undef PCI_EXP_TYPE_RC_EC
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#ifndef CONFIG_BQL
+#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
+#define netdev_completed_queue(_n, _p, _b) do {} while (0)
+#define netdev_tx_sent_queue(_q, _b) do {} while (0)
+#define netdev_sent_queue(_n, _b) do {} while (0)
+#define netdev_tx_reset_queue(_q) do {} while (0)
+#define netdev_reset_queue(_n) do {} while (0)
+#endif
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#endif /* SLE_VERSION(11,3,0) */
+#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q)
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))
+static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+ u8 *nexthdrp,
+ __be16 __always_unused *frag_offp)
+{
+ return ipv6_skip_exthdr(skb, start, nexthdrp);
+}
+#undef ipv6_skip_exthdr
+#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d))
+#endif /* !SLES11sp4 or greater */
+
+#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
+ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0)))
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+ return index % n_rx_rings;
+}
+#endif
+
+#else /* ! < 3.3.0 */
+#define HAVE_ETHTOOL_GRXFHINDIR_SIZE
+#define HAVE_INT_NDO_VLAN_RX_ADD_VID
+#ifdef ETHTOOL_SRXNTUPLE
+#undef ETHTOOL_SRXNTUPLE
+#endif
+#endif /* < 3.3.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
+#ifndef NETIF_F_RXFCS
+#define NETIF_F_RXFCS 0
+#endif /* NETIF_F_RXFCS */
+#ifndef NETIF_F_RXALL
+#define NETIF_F_RXALL 0
+#endif /* NETIF_F_RXALL */
+
+#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define NUMTCS_RETURNS_U8
+
+int _kc_simple_open(struct inode *inode, struct file *file);
+#define simple_open _kc_simple_open
+#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
+
+#ifndef skb_add_rx_frag
+#define skb_add_rx_frag _kc_skb_add_rx_frag
+extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
+ int, int, unsigned int);
+#endif
+#ifdef NET_ADDR_RANDOM
+#define eth_hw_addr_random(N) do { \
+ eth_random_addr(N->dev_addr); \
+ N->addr_assign_type |= NET_ADDR_RANDOM; \
+ } while (0)
+#else /* NET_ADDR_RANDOM */
+#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr)
+#endif /* NET_ADDR_RANDOM */
+
+#ifndef for_each_set_bit_from
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+#endif /* for_each_set_bit_from */
+
+#else /* < 3.4.0 */
+#include <linux/kconfig.h>
+#endif /* >= 3.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \
+ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) )
+#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+#define HAVE_PTP_1588_CLOCK
+#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
+
+#ifndef ether_addr_equal
+static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+ return !compare_ether_addr(addr1, addr2);
+}
+#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
+#endif
+
+#else
+#define HAVE_FDB_OPS
+#define HAVE_ETHTOOL_GET_TS_INFO
+#endif /* < 3.5.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+
+#ifndef MDIO_EEE_100TX
+#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
+#endif
+#ifndef MDIO_EEE_1000T
+#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
+#endif
+#ifndef MDIO_EEE_10GT
+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
+#endif
+#ifndef MDIO_EEE_1000KX
+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKX4
+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
+#endif
+#ifndef MDIO_EEE_10GKR
+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
+#endif
+
+#ifndef __GFP_MEMALLOC
+#define __GFP_MEMALLOC 0
+#endif
+
+#ifndef eth_random_addr
+#define eth_random_addr _kc_eth_random_addr
+static inline void _kc_eth_random_addr(u8 *addr)
+{
+ get_random_bytes(addr, ETH_ALEN);
+ addr[0] &= 0xfe; /* clear multicast */
+ addr[0] |= 0x02; /* set local assignment */
+}
+#endif /* eth_random_addr */
+#else /* < 3.6.0 */
+#define HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* < 3.6.0 */
+
+/******************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
+#ifndef ADVERTISED_40000baseKR4_Full
+/* these defines were all added in one commit, so should be safe
+ * to trigger activiation on one define
+ */
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#define ADVERTISED_40000baseKR4_Full (1 << 23)
+#define ADVERTISED_40000baseCR4_Full (1 << 24)
+#define ADVERTISED_40000baseSR4_Full (1 << 25)
+#define ADVERTISED_40000baseLR4_Full (1 << 26)
+#endif
+
+#ifndef mmd_eee_cap_to_ethtool_sup_t
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+ u32 supported = 0;
+
+ if (eee_cap & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_cap & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_cap & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_cap & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_cap & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_cap & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
+ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
+#endif /* mmd_eee_cap_to_ethtool_sup_t */
+
+#ifndef mmd_eee_adv_to_ethtool_adv_t
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
+ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
+#endif /* mmd_eee_adv_to_ethtool_adv_t */
+
+#ifndef ethtool_adv_to_mmd_eee_adv_t
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
+#endif /* ethtool_adv_to_mmd_eee_adv_t */
+
+#ifndef pci_pcie_type
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+static inline u8 pci_pcie_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ BUG_ON(!pos);
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+#else /* < 2.6.24 */
+#define pci_pcie_type(x) (x)->pcie_type
+#endif /* < 2.6.24 */
+#endif /* pci_pcie_type */
+
+#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \
+ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \
+ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) )
+#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
+#endif
+
+#ifndef pcie_capability_read_word
+int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
+#endif /* pcie_capability_read_word */
+
+#ifndef pcie_capability_write_word
+int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
+#endif /* pcie_capability_write_word */
+
+#ifndef pcie_capability_clear_and_set_word
+int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+#define pcie_capability_clear_and_set_word(d,p,c,s) \
+ __kc_pcie_capability_clear_and_set_word(d,p,c,s)
+#endif /* pcie_capability_clear_and_set_word */
+
+#ifndef pcie_capability_clear_word
+int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos,
+ u16 clear);
+#define pcie_capability_clear_word(d, p, c) \
+ __kc_pcie_capability_clear_word(d, p, c)
+#endif /* pcie_capability_clear_word */
+
+#ifndef PCI_EXP_LNKSTA2
+#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
+#define USE_CONST_DEV_UC_CHAR
+#endif
+
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8))
+#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi)
+#endif /* !RHEL6.8+ */
+
+#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6))
+#include <linux/hashtable.h>
+#else
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] __read_mostly = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+#endif /* RHEL >= 6.6 */
+
+#else /* >= 3.7.0 */
+#include <linux/hashtable.h>
+#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
+#define USE_CONST_DEV_UC_CHAR
+#endif /* >= 3.7.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
+#ifndef pci_sriov_set_totalvfs
+static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs)
+{
+ return 0;
+}
+#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b))
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L0S
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
+#endif
+#ifndef PCI_EXP_LNKCTL_ASPM_L1
+#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
+#endif
+#define HAVE_CONFIG_HOTPLUG
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+#ifndef is_link_local_ether_addr
+static inline bool __kc_is_link_local_ether_addr(const u8 *addr)
+{
+ __be16 *a = (__be16 *)addr;
+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+ static const __be16 m = cpu_to_be16(0xfff0);
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+}
+#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr)
+#endif /* is_link_local_ether_addr */
+int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ int target, unsigned short *fragoff, int *flags);
+#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e))
+
+#ifndef FLOW_MAC_EXT
+#define FLOW_MAC_EXT 0x40000000
+#endif /* FLOW_MAC_EXT */
+
+#else /* >= 3.8.0 */
+#ifndef __devinit
+#define __devinit
+#endif
+
+#ifndef __devinitdata
+#define __devinitdata
+#endif
+
+#ifndef __devinitconst
+#define __devinitconst
+#endif
+
+#ifndef __devexit
+#define __devexit
+#endif
+
+#ifndef __devexit_p
+#define __devexit_p
+#endif
+
+#ifndef HAVE_ENCAP_CSUM_OFFLOAD
+#define HAVE_ENCAP_CSUM_OFFLOAD
+#endif
+
+#ifndef HAVE_GRE_ENCAP_OFFLOAD
+#define HAVE_GRE_ENCAP_OFFLOAD
+#endif
+
+#ifndef HAVE_SRIOV_CONFIGURE
+#define HAVE_SRIOV_CONFIGURE
+#endif
+
+#define HAVE_BRIDGE_ATTRIBS
+#ifndef BRIDGE_MODE_VEB
+#define BRIDGE_MODE_VEB 0 /* Default loopback mode */
+#endif /* BRIDGE_MODE_VEB */
+#ifndef BRIDGE_MODE_VEPA
+#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
+#endif /* BRIDGE_MODE_VEPA */
+#endif /* >= 3.8.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
+
+#undef BUILD_BUG_ON
+#ifdef __CHECKER__
+#define BUILD_BUG_ON(condition) (0)
+#else /* __CHECKER__ */
+#ifndef __compiletime_warning
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#else /* __GNUC__ */
+#define __compiletime_warning(message)
+#endif /* __GNUC__ */
+#endif /* __compiletime_warning */
+#ifndef __compiletime_error
+#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400)
+#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_error_fallback(condition) do { } while (0)
+#else /* __GNUC__ */
+#define __compiletime_error(message)
+#define __compiletime_error_fallback(condition) \
+ do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
+#endif /* __GNUC__ */
+#else /* __compiletime_error */
+#define __compiletime_error_fallback(condition) do { } while (0)
+#endif /* __compiletime_error */
+#define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ bool __cond = !(condition); \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (__cond) \
+ prefix ## suffix(); \
+ __compiletime_error_fallback(__cond); \
+ } while (0)
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else /* __OPTIMIZE__ */
+#define BUILD_BUG_ON(condition) \
+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+#endif /* __OPTIMIZE__ */
+#endif /* __CHECKER__ */
+
+#undef hlist_entry
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#undef hlist_entry_safe
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+#undef hlist_for_each_entry
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+#undef hlist_for_each_entry_safe
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#undef hash_for_each
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+#undef hash_for_each_safe
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+#undef hash_for_each_possible
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+#undef hash_for_each_possible_safe
+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, tmp,\
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+#ifdef CONFIG_XPS
+extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
+#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
+#else /* CONFIG_XPS */
+#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
+#endif /* CONFIG_XPS */
+
+#ifdef HAVE_NETDEV_SELECT_QUEUE
+#define _kc_hashrnd 0xd631614b /* not so random hash salt */
+extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+#define __netdev_pick_tx __kc_netdev_pick_tx
+#endif /* HAVE_NETDEV_SELECT_QUEUE */
+#else
+#define HAVE_BRIDGE_FILTER
+#define HAVE_FDB_DEL_NLATTR
+#endif /* < 3.9.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
+#ifndef NAPI_POLL_WEIGHT
+#define NAPI_POLL_WEIGHT 64
+#endif
+#ifdef CONFIG_PCI_IOV
+extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
+#else
+static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev)
+{
+ return 0;
+}
+#endif
+#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
+
+#ifndef list_first_entry_or_null
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#endif
+
+#ifndef VLAN_TX_COOKIE_MAGIC
+static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
+ u16 vlan_tci)
+{
+#ifdef VLAN_TAG_PRESENT
+ vlan_tci |= VLAN_TAG_PRESENT;
+#endif
+ skb->vlan_tci = vlan_tci;
+ return skb;
+}
+#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
+ __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
+#endif
+
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags);
+#ifdef HAVE_FDB_DEL_NLATTR
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr);
+#else
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ const unsigned char *addr);
+#endif
+#else
+extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr, u16 flags);
+extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev,
+ unsigned char *addr);
+#endif
+#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add
+#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del
+#endif /* HAVE_FDB_OPS */
+
+#ifndef PCI_DEVID
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
+#endif
+#else /* >= 3.10.0 */
+#define HAVE_ENCAP_TSO_OFFLOAD
+#define USE_DEFAULT_FDB_DEL_DUMP
+#define HAVE_SKB_INNER_NETWORK_HEADER
+#endif /* >= 3.10.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) )
+#define netdev_notifier_info_to_dev(ptr) ptr
+#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)))
+#define HAVE_NDO_SET_VF_LINK_STATE
+#endif
+#else /* >= 3.11.0 */
+#define HAVE_NDO_SET_VF_LINK_STATE
+#define HAVE_SKB_INNER_PROTOCOL
+#define HAVE_MPLS_FEATURES
+#endif /* >= 3.11.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) )
+extern int __kc_pcie_get_minimum_link(struct pci_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+#ifndef pcie_get_minimum_link
+#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w)
+#endif
+#else /* >= 3.12.0 */
+#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
+#define HAVE_VXLAN_RX_OFFLOAD
+#endif /* < 4.8.0 */
+#define HAVE_NDO_GET_PHYS_PORT_ID
+#endif /* >= 3.12.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) )
+#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m)
+extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask);
+#ifndef u64_stats_init
+#define u64_stats_init(a) do { } while(0)
+#endif
+#ifndef BIT_ULL
+#define BIT_ULL(n) (1ULL << (n))
+#endif
+
+#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif
+#ifndef list_next_entry
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+#endif
+
+#else /* >= 3.13.0 */
+#define HAVE_VXLAN_CHECKS
+#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24))
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#else
+#define HAVE_NDO_SELECT_QUEUE_ACCEL
+#endif
+#define HAVE_NET_GET_RANDOM_ONCE
+#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
+
+#ifndef U16_MAX
+#define U16_MAX ((u16)~0U)
+#endif
+
+#ifndef U32_MAX
+#define U32_MAX ((u32)~0U)
+#endif
+
+#define dev_consume_skb_any(x) dev_kfree_skb_any(x)
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \
+ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)))
+
+/* it isn't expected that this would be a #define unless we made it so */
+#ifndef skb_set_hash
+
+#define PKT_HASH_TYPE_NONE 0
+#define PKT_HASH_TYPE_L2 1
+#define PKT_HASH_TYPE_L3 2
+#define PKT_HASH_TYPE_L4 3
+
+#define skb_set_hash __kc_skb_set_hash
+static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb,
+ u32 __maybe_unused hash,
+ int __maybe_unused type)
+{
+#ifdef HAVE_SKB_L4_RXHASH
+ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
+#endif
+#ifdef NETIF_F_RXHASH
+ skb->rxhash = hash;
+#endif
+}
+#endif /* !skb_set_hash */
+
+#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */
+
+#ifndef HAVE_VXLAN_RX_OFFLOAD
+#define HAVE_VXLAN_RX_OFFLOAD
+#endif /* HAVE_VXLAN_RX_OFFLOAD */
+
+#ifndef HAVE_VXLAN_CHECKS
+#define HAVE_VXLAN_CHECKS
+#endif /* HAVE_VXLAN_CHECKS */
+#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */
+
+#ifndef pci_enable_msix_range
+extern int __kc_pci_enable_msix_range(struct pci_dev *dev,
+ struct msix_entry *entries,
+ int minvec, int maxvec);
+#define pci_enable_msix_range __kc_pci_enable_msix_range
+#endif
+
+#ifndef ether_addr_copy
+#define ether_addr_copy __kc_ether_addr_copy
+static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ *(u32 *)dst = *(const u32 *)src;
+ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] = b[0];
+ a[1] = b[1];
+ a[2] = b[2];
+#endif
+}
+#endif /* ether_addr_copy */
+
+#else /* >= 3.14.0 */
+
+/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */
+#ifndef HAVE_NDO_DFWD_OPS
+#define HAVE_NDO_DFWD_OPS
+#endif
+#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK
+#endif /* 3.14.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) )
+
+#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \
+ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30)))
+#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh
+#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh
+#endif
+
+#else
+#define HAVE_PTP_1588_CLOCK_PINS
+#define HAVE_NETDEV_PORT
+#endif /* 3.15.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) )
+#ifndef smp_mb__before_atomic
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+#endif
+#ifndef __dev_uc_sync
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *, const unsigned char *),
+ int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#ifndef NETDEV_HW_ADDR_T_MULTICAST
+int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count,
+ struct net_device *dev,
+ int (*sync)(struct net_device *, const unsigned char *),
+ int (*unsync)(struct net_device *, const unsigned char *));
+void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *, const unsigned char *));
+#endif
+#endif /* HAVE_SET_RX_MODE */
+
+static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev,
+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count,
+ dev, sync, unsync);
+#else
+ return 0;
+#endif
+}
+#define __dev_uc_sync __kc_dev_uc_sync
+
+static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev,
+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_UNICAST
+ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_UNICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_uc_unsync __kc_dev_uc_unsync
+
+static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev,
+ int __maybe_unused (*sync)(struct net_device *, const unsigned char *),
+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
+#elif defined(HAVE_SET_RX_MODE)
+ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count,
+ dev, sync, unsync);
+#else
+ return 0;
+#endif
+
+}
+#define __dev_mc_sync __kc_dev_mc_sync
+
+static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev,
+ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *))
+{
+#ifdef HAVE_SET_RX_MODE
+#ifdef NETDEV_HW_ADDR_T_MULTICAST
+ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync);
+#else /* NETDEV_HW_ADDR_T_MULTICAST */
+ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync);
+#endif /* NETDEV_HW_ADDR_T_MULTICAST */
+#endif /* HAVE_SET_RX_MODE */
+}
+#define __dev_mc_unsync __kc_dev_mc_unsync
+#endif /* __dev_uc_sync */
+
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif
+
+#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM
+/* if someone backports this, hopefully they backport as a #define.
+ * declare it as zero on older kernels so that if it get's or'd in
+ * it won't effect anything, therefore preventing core driver changes
+ */
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0
+#define SKB_GSO_UDP_TUNNEL_CSUM 0
+#endif
+
+#else
+#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY
+#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE
+#endif /* 3.16.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) )
+#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \
+ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \
+ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))
+#ifndef timespec64
+#define timespec64 timespec
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+ return ts;
+}
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ return ts64;
+}
+#define timespec64_equal timespec_equal
+#define timespec64_compare timespec_compare
+#define set_normalized_timespec64 set_normalized_timespec
+#define timespec64_add_safe timespec_add_safe
+#define timespec64_add timespec_add
+#define timespec64_sub timespec_sub
+#define timespec64_valid timespec_valid
+#define timespec64_valid_strict timespec_valid_strict
+#define timespec64_to_ns timespec_to_ns
+#define ns_to_timespec64 ns_to_timespec
+#define ktime_to_timespec64 ktime_to_timespec
+#define timespec64_add_ns timespec_add_ns
+#endif /* timespec64 */
+#endif /* !(RHEL6.8<RHEL7.0) && !RHEL7.2+ */
+
+#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a)
+#else
+#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT
+#endif /* 3.17.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) )
+#ifndef NO_PTP_SUPPORT
+#include <linux/errqueue.h>
+extern struct sk_buff *__kc_skb_clone_sk(struct sk_buff *skb);
+extern void __kc_skb_complete_tx_timestamp(struct sk_buff *skb,
+ struct skb_shared_hwtstamps *hwtstamps);
+#define skb_clone_sk __kc_skb_clone_sk
+#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp
+#endif
+extern unsigned int __kc_eth_get_headlen(unsigned char *data, unsigned int max_len);
+#define eth_get_headlen __kc_eth_get_headlen
+#ifndef ETH_P_XDSA
+#define ETH_P_XDSA 0x00F8
+#endif
+/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1))
+#define HAVE_SKBUFF_CSUM_LEVEL
+#endif /* >= RH 7.1 */
+
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#else /* 3.18.0 */
+#define HAVE_SKBUFF_CSUM_LEVEL
+#define HAVE_SKB_XMIT_MORE
+#define HAVE_SKB_INNER_PROTOCOL_TYPE
+#endif /* 3.18.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) )
+#else
+#define HAVE_NDO_FEATURES_CHECK
+#endif /* 3.18.4 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) )
+/* netdev_phys_port_id renamed to netdev_phys_item_id */
+#define netdev_phys_item_id netdev_phys_port_id
+
+static inline void _kc_napi_complete_done(struct napi_struct *napi,
+ int __always_unused work_done) {
+ napi_complete(napi);
+}
+#define napi_complete_done _kc_napi_complete_done
+
+#ifndef NETDEV_RSS_KEY_LEN
+#define NETDEV_RSS_KEY_LEN (13 * 4)
+#endif
+#if ( !(RHEL_RELEASE_CODE && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) )
+#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill(buffer, len)
+#endif /* RHEL_RELEASE_CODE */
+extern void __kc_netdev_rss_key_fill(void *buffer, size_t len);
+#define SPEED_20000 20000
+#define SPEED_40000 40000
+#ifndef dma_rmb
+#define dma_rmb() rmb()
+#endif
+#ifndef dev_alloc_pages
+#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order))
+#endif
+#ifndef dev_alloc_page
+#define dev_alloc_page() dev_alloc_pages(0)
+#endif
+#if !defined(eth_skb_pad) && !defined(skb_put_padto)
+/**
+ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+
+ if (unlikely(size < len)) {
+ len -= size;
+ if (skb_pad(skb, len))
+ return -ENOMEM;
+ __skb_put(skb, len);
+ }
+ return 0;
+}
+#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len)
+
+static inline int __kc_eth_skb_pad(struct sk_buff *skb)
+{
+ return __kc_skb_put_padto(skb, ETH_ZLEN);
+}
+#define eth_skb_pad(skb) __kc_eth_skb_pad(skb)
+#endif /* eth_skb_pad && skb_put_padto */
+
+#ifndef SKB_ALLOC_NAPI
+/* RHEL 7.2 backported napi_alloc_skb and friends */
+static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length)
+{
+ return netdev_alloc_skb_ip_align(napi->dev, length);
+}
+#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len)
+#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len)
+#endif /* SKB_ALLOC_NAPI */
+#define HAVE_CONFIG_PM_RUNTIME
+#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \
+ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
+#define HAVE_RXFH_HASHFUNC
+#endif /* 6.7 < RHEL < 7.0 */
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* RHEL > 7.1 */
+#ifndef napi_schedule_irqoff
+#define napi_schedule_irqoff napi_schedule
+#endif
+#ifndef READ_ONCE
+#define READ_ONCE(_x) ACCESS_ONCE(_x)
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_FDB_ADD_VID
+#endif
+#else /* 3.19.0 */
+#define HAVE_NDO_FDB_ADD_VID
+#define HAVE_RXFH_HASHFUNC
+#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS
+#endif /* 3.19.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) )
+/* vlan_tx_xx functions got renamed to skb_vlan */
+#ifndef skb_vlan_tag_get
+#define skb_vlan_tag_get vlan_tx_tag_get
+#endif
+#ifndef skb_vlan_tag_present
+#define skb_vlan_tag_present vlan_tx_tag_present
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1))
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif
+#else
+#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
+#endif /* 3.20.0 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) )
+#ifndef NO_PTP_SUPPORT
+#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H
+#include <linux/timecounter.h>
+#else
+#include <linux/clocksource.h>
+#endif
+static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+ tc->nsec += delta;
+}
+#define timecounter_adjtime __kc_timecounter_adjtime
+#endif
+#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#endif
+#else
+#define HAVE_PTP_CLOCK_INFO_GETTIME64
+#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
+#define HAVE_PASSTHRU_FEATURES_CHECK
+#define HAVE_NDO_SET_VF_RSS_QUERY_EN
+#endif /* 4,1,0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9))
+#if (!(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) && \
+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)))
+static inline bool page_is_pfmemalloc(struct page __maybe_unused *page)
+{
+#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC
+ return page->pfmemalloc;
+#else
+ return false;
+#endif
+}
+#endif /* !SLES12sp1 */
+#else
+#undef HAVE_STRUCT_PAGE_PFMEMALLOC
+#endif /* 4.1.9 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0))
+#else
+#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT
+#endif /* 4.2.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0))
+#ifndef CONFIG_64BIT
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
+#include <asm-generic/io-64-nonatomic-lo-hi.h> /* 32-bit readq/writeq */
+#else /* 3.3.0 => 4.3.x */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
+#include <asm-generic/int-ll64.h>
+#endif /* 2.6.26 => 3.3.0 */
+#ifndef readq
+static inline __u64 readq(const volatile void __iomem *addr)
+{
+ const volatile u32 __iomem *p = addr;
+ u32 low, high;
+
+ low = readl(p);
+ high = readl(p + 1);
+
+ return low + ((u64)high << 32);
+}
+#define readq readq
+#endif
+
+#ifndef writeq
+static inline void writeq(__u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#define writeq writeq
+#endif
+#endif /* < 3.3.0 */
+#endif /* !CONFIG_64BIT */
+#else
+#define HAVE_NDO_SET_VF_TRUST
+
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h> /* 32-bit readq/writeq */
+#endif /* !CONFIG_64BIT */
+#endif /* 4.4.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0))
+/* protect against a likely backport */
+#ifndef NETIF_F_CSUM_MASK
+#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM
+#endif /* NETIF_F_CSUM_MASK */
+#ifndef NETIF_F_SCTP_CRC
+#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM
+#endif /* NETIF_F_SCTP_CRC */
+#else
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) )
+#define HAVE_GENEVE_RX_OFFLOAD
+#endif /* < 4.8.0 */
+#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD
+#endif /* 4.5.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0))
+#if !(UBUNTU_VERSION_CODE && \
+ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \
+ !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)))
+static inline void napi_consume_skb(struct sk_buff *skb,
+ int __always_unused budget)
+{
+ dev_consume_skb_any(skb);
+}
+
+#endif /* UBUNTU_VERSION(4,4,0,21) */
+static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+{
+ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+}
+
+#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)))
+static inline void page_ref_inc(struct page *page)
+{
+ atomic_inc(&page->_count);
+}
+
+#endif
+
+#endif /* 4.6.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
+#else
+#define HAVE_NETIF_TRANS_UPDATE
+#endif /* 4.7.0 */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0))
+enum udp_parsable_tunnel_type {
+ UDP_TUNNEL_TYPE_VXLAN,
+ UDP_TUNNEL_TYPE_GENEVE,
+};
+struct udp_tunnel_info {
+ unsigned short type;
+ sa_family_t sa_family;
+ __be16 port;
+};
+#else
+#define HAVE_UDP_ENC_RX_OFFLOAD
+#endif /* 4.8.0 */
+
+#endif /* _KCOMPAT_H_ */
diff --git a/drivers/net/ethernet/intel/igb/kcompat_ethtool.c b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c
new file mode 100644
index 000000000000..0bcae160e332
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/kcompat_ethtool.c
@@ -0,0 +1,1169 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2015 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs. We fall back to calling do_ioctl()
+ * for drivers which haven't been converted to ethtool_ops yet.
+ *
+ * It's GPL, stupid.
+ *
+ * Modification by sfeldma@pobox.com to work as backward compat
+ * solution for pre-ethtool_ops kernels.
+ * - copied struct ethtool_ops from ethtool.h
+ * - defined SET_ETHTOOL_OPS
+ * - put in some #ifndef NETIF_F_xxx wrappers
+ * - changes refs to dev->ethtool_ops to ethtool_ops
+ * - changed dev_ethtool to ethtool_ioctl
+ * - remove EXPORT_SYMBOL()s
+ * - added _kc_ prefix in built-in ethtool_op_xxx ops.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+#include "kcompat.h"
+
+#undef SUPPORTED_10000baseT_Full
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#undef ADVERTISED_10000baseT_Full
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#undef SPEED_10000
+#define SPEED_10000 10000
+
+#undef ethtool_ops
+#define ethtool_ops _kc_ethtool_ops
+
+struct _kc_ethtool_ops {
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ u32 (*get_rx_csum)(struct net_device *);
+ int (*set_rx_csum)(struct net_device *, u32);
+ u32 (*get_tx_csum)(struct net_device *);
+ int (*set_tx_csum)(struct net_device *, u32);
+ u32 (*get_sg)(struct net_device *);
+ int (*set_sg)(struct net_device *, u32);
+ u32 (*get_tso)(struct net_device *);
+ int (*set_tso)(struct net_device *, u32);
+ int (*self_test_count)(struct net_device *);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+ int (*phys_id)(struct net_device *, u32);
+ int (*get_stats_count)(struct net_device *);
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *,
+ u64 *);
+} *ethtool_ops = NULL;
+
+#undef SET_ETHTOOL_OPS
+#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops))
+
+/*
+ * Some useful ethtool_ops methods that are device independent. If we find that
+ * all drivers want to do the same thing here, we can turn these into dev_()
+ * function calls.
+ */
+
+#undef ethtool_op_get_link
+#define ethtool_op_get_link _kc_ethtool_op_get_link
+u32 _kc_ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+#undef ethtool_op_get_tx_csum
+#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum
+u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev)
+{
+#ifdef NETIF_F_IP_CSUM
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tx_csum
+#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum
+int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_IP_CSUM
+ if (data)
+#ifdef NETIF_F_IPV6_CSUM
+ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+#else
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+#endif
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_sg
+#define ethtool_op_get_sg _kc_ethtool_op_get_sg
+u32 _kc_ethtool_op_get_sg(struct net_device *dev)
+{
+#ifdef NETIF_F_SG
+ return (dev->features & NETIF_F_SG) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_sg
+#define ethtool_op_set_sg _kc_ethtool_op_set_sg
+int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_SG
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+#endif
+
+ return 0;
+}
+
+#undef ethtool_op_get_tso
+#define ethtool_op_get_tso _kc_ethtool_op_get_tso
+u32 _kc_ethtool_op_get_tso(struct net_device *dev)
+{
+#ifdef NETIF_F_TSO
+ return (dev->features & NETIF_F_TSO) != 0;
+#else
+ return 0;
+#endif
+}
+
+#undef ethtool_op_set_tso
+#define ethtool_op_set_tso _kc_ethtool_op_set_tso
+int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+#ifdef NETIF_F_TSO
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+#endif
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_drvinfo info;
+ struct ethtool_ops *ops = ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_regs regs;
+ struct ethtool_ops *ops = ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&regs, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, &regs, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &regs, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, reglen))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_get_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+
+ if (!ethtool_ops->get_msglevel)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_msglevel(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_msglevel(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_msglevel)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_msglevel(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_link(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GLINK };
+
+ if (!ethtool_ops->get_link)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_link(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ goto out;
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(eeprom.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len))
+ goto out;
+
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len))
+ ret = -EFAULT;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GRXCSUM };
+
+ if (!ethtool_ops->get_rx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_rx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ ethtool_ops->set_rx_csum(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTXCSUM };
+
+ if (!ethtool_ops->get_tx_csum)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tx_csum(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_get_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GSG };
+
+ if (!ethtool_ops->get_sg)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_sg(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_sg(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_sg(dev, edata.data);
+}
+
+static int ethtool_get_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GTSO };
+
+ if (!ethtool_ops->get_tso)
+ return -EOPNOTSUPP;
+
+ edata.data = ethtool_ops->get_tso(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_tso(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_self_test(struct net_device *dev, char *useraddr)
+{
+ struct ethtool_test test;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->self_test || !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = ops->self_test_count(dev);
+ data = kmalloc(test.len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ struct ethtool_ops *ops = ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void *useraddr)
+{
+ struct ethtool_stats stats;
+ struct ethtool_ops *ops = ethtool_ops;
+ u64 *data;
+ int ret;
+
+ if (!ops->get_ethtool_stats || !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = ops->get_stats_count(dev);
+ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+#define ETHTOOL_OPS_COMPAT
+int ethtool_ioctl(struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
+ void *useraddr = (void *) ifr->ifr_data;
+ u32 ethcmd;
+
+ /*
+ * XXX: This can be pushed down into the ethtool_* handlers that
+ * need it. Keep existing behavior for the moment.
+ */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ return ethtool_get_settings(dev, useraddr);
+ case ETHTOOL_SSET:
+ return ethtool_set_settings(dev, useraddr);
+ case ETHTOOL_GDRVINFO:
+ return ethtool_get_drvinfo(dev, useraddr);
+ case ETHTOOL_GREGS:
+ return ethtool_get_regs(dev, useraddr);
+ case ETHTOOL_GWOL:
+ return ethtool_get_wol(dev, useraddr);
+ case ETHTOOL_SWOL:
+ return ethtool_set_wol(dev, useraddr);
+ case ETHTOOL_GMSGLVL:
+ return ethtool_get_msglevel(dev, useraddr);
+ case ETHTOOL_SMSGLVL:
+ return ethtool_set_msglevel(dev, useraddr);
+ case ETHTOOL_NWAY_RST:
+ return ethtool_nway_reset(dev);
+ case ETHTOOL_GLINK:
+ return ethtool_get_link(dev, useraddr);
+ case ETHTOOL_GEEPROM:
+ return ethtool_get_eeprom(dev, useraddr);
+ case ETHTOOL_SEEPROM:
+ return ethtool_set_eeprom(dev, useraddr);
+ case ETHTOOL_GCOALESCE:
+ return ethtool_get_coalesce(dev, useraddr);
+ case ETHTOOL_SCOALESCE:
+ return ethtool_set_coalesce(dev, useraddr);
+ case ETHTOOL_GRINGPARAM:
+ return ethtool_get_ringparam(dev, useraddr);
+ case ETHTOOL_SRINGPARAM:
+ return ethtool_set_ringparam(dev, useraddr);
+ case ETHTOOL_GPAUSEPARAM:
+ return ethtool_get_pauseparam(dev, useraddr);
+ case ETHTOOL_SPAUSEPARAM:
+ return ethtool_set_pauseparam(dev, useraddr);
+ case ETHTOOL_GRXCSUM:
+ return ethtool_get_rx_csum(dev, useraddr);
+ case ETHTOOL_SRXCSUM:
+ return ethtool_set_rx_csum(dev, useraddr);
+ case ETHTOOL_GTXCSUM:
+ return ethtool_get_tx_csum(dev, useraddr);
+ case ETHTOOL_STXCSUM:
+ return ethtool_set_tx_csum(dev, useraddr);
+ case ETHTOOL_GSG:
+ return ethtool_get_sg(dev, useraddr);
+ case ETHTOOL_SSG:
+ return ethtool_set_sg(dev, useraddr);
+ case ETHTOOL_GTSO:
+ return ethtool_get_tso(dev, useraddr);
+ case ETHTOOL_STSO:
+ return ethtool_set_tso(dev, useraddr);
+ case ETHTOOL_TEST:
+ return ethtool_self_test(dev, useraddr);
+ case ETHTOOL_GSTRINGS:
+ return ethtool_get_strings(dev, useraddr);
+ case ETHTOOL_PHYS_ID:
+ return ethtool_phys_id(dev, useraddr);
+ case ETHTOOL_GSTATS:
+ return ethtool_get_stats(dev, useraddr);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+#define mii_if_info _kc_mii_if_info
+struct _kc_mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+struct ethtool_cmd;
+struct mii_ioctl_data;
+
+#undef mii_link_ok
+#define mii_link_ok _kc_mii_link_ok
+#undef mii_nway_restart
+#define mii_nway_restart _kc_mii_nway_restart
+#undef mii_ethtool_gset
+#define mii_ethtool_gset _kc_mii_ethtool_gset
+#undef mii_ethtool_sset
+#define mii_ethtool_sset _kc_mii_ethtool_sset
+#undef mii_check_link
+#define mii_check_link _kc_mii_check_link
+extern int _kc_mii_link_ok (struct mii_if_info *mii);
+extern int _kc_mii_nway_restart (struct mii_if_info *mii);
+extern int _kc_mii_ethtool_gset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern int _kc_mii_ethtool_sset(struct mii_if_info *mii,
+ struct ethtool_cmd *ecmd);
+extern void _kc_mii_check_link (struct mii_if_info *mii);
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+#undef generic_mii_ioctl
+#define generic_mii_ioctl _kc_generic_mii_ioctl
+extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+#endif /* > 2.4.6 */
+
+
+struct _kc_pci_dev_ext {
+ struct pci_dev *dev;
+ void *pci_drvdata;
+ struct pci_driver *driver;
+};
+
+struct _kc_net_dev_ext {
+ struct net_device *dev;
+ unsigned int carrier;
+};
+
+
+/**************************************/
+/* mii support */
+
+int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+ u32 advert, bmcr, lpa, nego;
+
+ ecmd->supported =
+ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
+
+ /* only supports twisted-pair */
+ ecmd->port = PORT_MII;
+
+ /* only supports internal transceiver */
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ /* this isn't fully supported at higher layers */
+ ecmd->phy_address = mii->phy_id;
+
+ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ if (advert & ADVERTISE_10HALF)
+ ecmd->advertising |= ADVERTISED_10baseT_Half;
+ if (advert & ADVERTISE_10FULL)
+ ecmd->advertising |= ADVERTISED_10baseT_Full;
+ if (advert & ADVERTISE_100HALF)
+ ecmd->advertising |= ADVERTISED_100baseT_Half;
+ if (advert & ADVERTISE_100FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA);
+ if (bmcr & BMCR_ANENABLE) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+
+ nego = mii_nway_result(advert & lpa);
+ if (nego == LPA_100FULL || nego == LPA_100HALF)
+ ecmd->speed = SPEED_100;
+ else
+ ecmd->speed = SPEED_10;
+ if (nego == LPA_100FULL || nego == LPA_10FULL) {
+ ecmd->duplex = DUPLEX_FULL;
+ mii->full_duplex = 1;
+ } else {
+ ecmd->duplex = DUPLEX_HALF;
+ mii->full_duplex = 0;
+ }
+ } else {
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
+ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ }
+
+ /* ignore maxtxpkt, maxrxpkt for now */
+
+ return 0;
+}
+
+int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
+{
+ struct net_device *dev = mii->dev;
+
+ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ return -EINVAL;
+ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ if (ecmd->port != PORT_MII)
+ return -EINVAL;
+ if (ecmd->transceiver != XCVR_INTERNAL)
+ return -EINVAL;
+ if (ecmd->phy_address != mii->phy_id)
+ return -EINVAL;
+ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ /* ignore supported, maxtxpkt, maxrxpkt */
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ u32 bmcr, advert, tmp;
+
+ if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full)) == 0)
+ return -EINVAL;
+
+ /* advertise only what has been requested */
+ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (ADVERTISED_10baseT_Half)
+ tmp |= ADVERTISE_10HALF;
+ if (ADVERTISED_10baseT_Full)
+ tmp |= ADVERTISE_10FULL;
+ if (ADVERTISED_100baseT_Half)
+ tmp |= ADVERTISE_100HALF;
+ if (ADVERTISED_100baseT_Full)
+ tmp |= ADVERTISE_100FULL;
+ if (advert != tmp) {
+ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
+ mii->advertising = tmp;
+ }
+
+ /* turn on autonegotiation, and force a renegotiate */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr);
+
+ mii->force_media = 0;
+ } else {
+ u32 bmcr, tmp;
+
+ /* turn off auto negotiation, set speed and duplexity */
+ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
+ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ecmd->speed == SPEED_100)
+ tmp |= BMCR_SPEED100;
+ if (ecmd->duplex == DUPLEX_FULL) {
+ tmp |= BMCR_FULLDPLX;
+ mii->full_duplex = 1;
+ } else
+ mii->full_duplex = 0;
+ if (bmcr != tmp)
+ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp);
+
+ mii->force_media = 1;
+ }
+ return 0;
+}
+
+int _kc_mii_link_ok (struct mii_if_info *mii)
+{
+ /* first, a dummy read, needed to latch some MII phys */
+ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
+ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS)
+ return 1;
+ return 0;
+}
+
+int _kc_mii_nway_restart (struct mii_if_info *mii)
+{
+ int bmcr;
+ int r = -EINVAL;
+
+ /* if autoneg is off, it's an error */
+ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+
+ if (bmcr & BMCR_ANENABLE) {
+ bmcr |= BMCR_ANRESTART;
+ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr);
+ r = 0;
+ }
+
+ return r;
+}
+
+void _kc_mii_check_link (struct mii_if_info *mii)
+{
+ int cur_link = mii_link_ok(mii);
+ int prev_link = netif_carrier_ok(mii->dev);
+
+ if (cur_link && !prev_link)
+ netif_carrier_on(mii->dev);
+ else if (prev_link && !cur_link)
+ netif_carrier_off(mii->dev);
+}
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) )
+int _kc_generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_chg_out)
+{
+ int rc = 0;
+ unsigned int duplex_changed = 0;
+
+ if (duplex_chg_out)
+ *duplex_chg_out = 0;
+
+ mii_data->phy_id &= mii_if->phy_id_mask;
+ mii_data->reg_num &= mii_if->reg_num_mask;
+
+ switch(cmd) {
+ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */
+ case SIOCGMIIPHY:
+ mii_data->phy_id = mii_if->phy_id;
+ /* fall through */
+
+ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */
+ case SIOCGMIIREG:
+ mii_data->val_out =
+ mii_if->mdio_read(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num);
+ break;
+
+ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */
+ case SIOCSMIIREG: {
+ u16 val = mii_data->val_in;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (mii_data->phy_id == mii_if->phy_id) {
+ switch(mii_data->reg_num) {
+ case MII_BMCR: {
+ unsigned int new_duplex = 0;
+ if (val & (BMCR_RESET|BMCR_ANENABLE))
+ mii_if->force_media = 0;
+ else
+ mii_if->force_media = 1;
+ if (mii_if->force_media &&
+ (val & BMCR_FULLDPLX))
+ new_duplex = 1;
+ if (mii_if->full_duplex != new_duplex) {
+ duplex_changed = 1;
+ mii_if->full_duplex = new_duplex;
+ }
+ break;
+ }
+ case MII_ADVERTISE:
+ mii_if->advertising = val;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+
+ mii_if->mdio_write(mii_if->dev, mii_data->phy_id,
+ mii_data->reg_num, val);
+ break;
+ }
+
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+
+ if ((rc == 0) && (duplex_chg_out) && (duplex_changed))
+ *duplex_chg_out = 1;
+
+ return rc;
+}
+#endif /* > 2.4.6 */
+
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 371dc91c4cf5..e0e7998f57f3 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -59,6 +59,10 @@
#include <mach/pinmux.h>
#include <mach/pinmux-t12.h>
+#ifdef CONFIG_MACH_APALIS_TK1
+#include "../../../arch/arm/mach-tegra/board-apalis-tk1.h"
+#endif
+
/* register definitions */
#define AFI_OFFSET 0x3800
#define PADS_OFFSET 0x3000
@@ -278,6 +282,10 @@ struct tegra_pcie_info {
struct regulator *regulator_hvdd;
struct regulator *regulator_pexio;
struct regulator *regulator_avdd_plle;
+#ifdef CONFIG_MACH_APALIS_TK1
+ struct regulator *regulator_apalis_tk1_ldo9;
+ struct regulator *regulator_apalis_tk1_ldo10;
+#endif /* CONFIG_MACH_APALIS_TK1 */
struct clk *pcie_xclk;
struct clk *pcie_mselect;
struct clk *pcie_emc;
@@ -304,6 +312,21 @@ static unsigned long tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_204;
static unsigned long tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_250;
static unsigned long tegra_pcie_emc_rate = TEGRA_PCIE_EMC_CLK_102;
+#ifdef CONFIG_MACH_APALIS_TK1
+/* To disable the PCIe switch reset errata workaround */
+int g_pex_perst = 1;
+
+/* To disable the PCIe switch reset errata workaround */
+static int __init disable_pex_perst(char *s)
+{
+ if (!(*s) || !strcmp(s, "0"))
+ g_pex_perst = 0;
+
+ return 0;
+}
+__setup("pex_perst=", disable_pex_perst);
+#endif /* CONFIG_MACH_APALIS_TK1 */
+
static inline void afi_writel(u32 value, unsigned long offset)
{
writel(value, offset + AFI_OFFSET + tegra_pcie.regs);
@@ -957,12 +980,88 @@ static int tegra_pcie_enable_pads(bool enable)
return err;
}
+static void tegra_pcie_port_reset(struct tegra_pcie_port *pp, u32 reset_reg)
+{
+ u32 reg;
+
+ PR_FUNC_LINE;
+#ifdef CONFIG_MACH_APALIS_TK1
+ /* Reset PLX PEX 8605 PCIe Switch plus PCIe devices on Apalis Evaluation
+ Board */
+ if (g_pex_perst) gpio_direction_output(PEX_PERST_N, 0);
+ gpio_direction_output(RESET_MOCI_CTRL, 0);
+
+ /* Reset I210 Gigabit Ethernet Controller */
+ gpio_direction_output(LAN_RESET_N, 0);
+
+ /*
+ * Make sure we don't get any back feeding from LAN_WAKE_N resp.
+ * DEV_OFF_N
+ */
+ gpio_direction_output(LAN_WAKE_N, 0);
+ gpio_direction_output(LAN_DEV_OFF_N, 0);
+
+ /* Make sure LDO9 and LDO10 are initially disabled @ 0V */
+ if (regulator_is_enabled(tegra_pcie.regulator_apalis_tk1_ldo9))
+ regulator_disable(tegra_pcie.regulator_apalis_tk1_ldo9);
+ if (regulator_is_enabled(tegra_pcie.regulator_apalis_tk1_ldo10))
+ regulator_disable(tegra_pcie.regulator_apalis_tk1_ldo10);
+
+ mdelay(100);
+
+ /* Make sure LAN_WAKE_N gets re-configured as a GPIO input */
+ gpio_direction_input(LAN_WAKE_N);
+
+ /* Make sure controller gets enabled by disabling DEV_OFF_N */
+ gpio_set_value(LAN_DEV_OFF_N, 1);
+
+ /*
+ * Enable LDO9 and LDO10 for +V3.3_ETH on patched prototype
+ * V1.0A and sample V1.0B and newer modules
+ */
+ if (regulator_enable(tegra_pcie.regulator_apalis_tk1_ldo9) < 0) {
+ pr_err("pcie: couldn't enable regulator i210_vdd3p3_ldo9\n");
+ return;
+ }
+ if (regulator_enable(tegra_pcie.regulator_apalis_tk1_ldo10) < 0) {
+ pr_err("pcie: couldn't enable regulator i210_vdd3p3_ldo10\n");
+ return;
+ }
+#endif /* CONFIG_MACH_APALIS_TK1 */
+
+ /* Pulse the PEX reset */
+ reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
+ afi_writel(reg, reset_reg);
+
+ /* Must be asserted for 100 ms after power and clocks are stable */
+ mdelay(100);
+
+ reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
+ afi_writel(reg, reset_reg);
+
+#ifdef CONFIG_MACH_APALIS_TK1
+ gpio_set_value(LAN_RESET_N, 1);
+
+ if (g_pex_perst) gpio_set_value(PEX_PERST_N, 1);
+ /* Err_5: PEX_REFCLK_OUTpx/nx Clock Outputs is not Guaranteed Until
+ 900 us After PEX_PERST# De-assertion */
+ if (g_pex_perst) mdelay(1);
+ gpio_set_value(RESET_MOCI_CTRL, 1);
+
+ /* Release I210 Gigabit Ethernet Controller Reset */
+ gpio_set_value(LAN_RESET_N, 1);
+#endif /* CONFIG_MACH_APALIS_TK1 */
+}
+
static int tegra_pcie_enable_controller(void)
{
u32 val, reg;
int i, ret = 0, lane_owner;
+ struct tegra_pcie_port *pp;
+ int ctrl_offset = AFI_PEX0_CTRL;
PR_FUNC_LINE;
+
/* Enable slot clock and ensure reset signal is assert */
for (i = 0; i < ARRAY_SIZE(pex_controller_registers); i++) {
reg = pex_controller_registers[i];
@@ -1035,18 +1134,21 @@ static int tegra_pcie_enable_controller(void)
/* Take the PCIe interface module out of reset */
tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
- /* deassert PEX reset signal */
- for (i = 0; i < ARRAY_SIZE(pex_controller_registers); i++) {
- val = afi_readl(pex_controller_registers[i]);
- val |= AFI_PEX_CTRL_RST;
- afi_writel(val, pex_controller_registers[i]);
+ /* Wait for clock to latch (min of 100us) */
+ udelay(100);
+
+ pp = tegra_pcie.port + tegra_pcie.num_ports;
+ for (i = 0; i < MAX_PCIE_SUPPORTED_PORTS; i++) {
+ ctrl_offset += (i * 8);
+ tegra_pcie_port_reset(pp, ctrl_offset);
}
+
return ret;
}
-#ifdef USE_REGULATORS
static int tegra_pcie_enable_regulators(void)
{
+ int ret;
PR_FUNC_LINE;
if (tegra_pcie.power_rails_enabled) {
pr_debug("PCIE: Already power rails enabled");
@@ -1089,12 +1191,37 @@ static int tegra_pcie_enable_regulators(void)
tegra_pcie.regulator_avdd_plle = 0;
}
}
+
+#ifdef CONFIG_MACH_APALIS_TK1
+ if (tegra_pcie.regulator_apalis_tk1_ldo9 == NULL) {
+ tegra_pcie.regulator_apalis_tk1_ldo9 = regulator_get(tegra_pcie.dev, "i210_vdd3p3_ldo9");
+ if (IS_ERR(tegra_pcie.regulator_apalis_tk1_ldo9)) {
+ pr_err("pcie: couldn't get regulator i210_vdd3p3_ldo9\n");
+ tegra_pcie.regulator_apalis_tk1_ldo9 = 0;
+ }
+ }
+
+ if (tegra_pcie.regulator_apalis_tk1_ldo10 == NULL) {
+ tegra_pcie.regulator_apalis_tk1_ldo10 = regulator_get(tegra_pcie.dev, "i210_vdd3p3_ldo10");
+ if (IS_ERR(tegra_pcie.regulator_apalis_tk1_ldo10)) {
+ pr_err("pcie: couldn't get regulator i210_vdd3p3_ldo10\n");
+ tegra_pcie.regulator_apalis_tk1_ldo10 = 0;
+ }
+ }
+#endif /* CONFIG_MACH_APALIS_TK1 */
+
if (tegra_pcie.regulator_hvdd)
- regulator_enable(tegra_pcie.regulator_hvdd);
+ ret = regulator_enable(tegra_pcie.regulator_hvdd);
if (tegra_pcie.regulator_pexio)
- regulator_enable(tegra_pcie.regulator_pexio);
+ ret = regulator_enable(tegra_pcie.regulator_pexio);
if (tegra_pcie.regulator_avdd_plle)
- regulator_enable(tegra_pcie.regulator_avdd_plle);
+ ret = regulator_enable(tegra_pcie.regulator_avdd_plle);
+#ifdef CONFIG_MACH_APALIS_TK1
+ if (tegra_pcie.regulator_apalis_tk1_ldo9)
+ ret = regulator_enable(tegra_pcie.regulator_apalis_tk1_ldo9);
+ if (tegra_pcie.regulator_apalis_tk1_ldo10)
+ ret = regulator_enable(tegra_pcie.regulator_apalis_tk1_ldo10);
+#endif /* CONFIG_MACH_APALIS_TK1 */
return 0;
}
@@ -1110,19 +1237,14 @@ static int tegra_pcie_disable_regulators(void)
}
if (tegra_pcie.regulator_hvdd)
err = regulator_disable(tegra_pcie.regulator_hvdd);
- if (err)
- goto err_exit;
if (tegra_pcie.regulator_pexio)
err = regulator_disable(tegra_pcie.regulator_pexio);
- if (err)
- goto err_exit;
if (tegra_pcie.regulator_avdd_plle)
err = regulator_disable(tegra_pcie.regulator_avdd_plle);
tegra_pcie.power_rails_enabled = 0;
err_exit:
return err;
}
-#endif
static int tegra_pcie_power_ungate(void)
{
@@ -1377,6 +1499,15 @@ static int tegra_pcie_get_resources(void)
pr_err("PCIE: failed to get clocks: %d\n", err);
goto err_clk_get;
}
+
+ err = tegra_pcie_enable_regulators();
+ if (err) {
+ pr_err("PCIE: failed to setup regulators: %d\n", err);
+ goto err_reg_get;
+ }
+ msleep(100);
+ /* wait 100ms for regulator to power-up */
+
err = tegra_pcie_power_on();
if (err) {
pr_err("PCIE: Failed to power on: %d\n", err);
@@ -1404,6 +1535,7 @@ static int tegra_pcie_get_resources(void)
err_pwr_on:
tegra_pcie_power_off(false);
+err_reg_get:
err_clk_get:
tegra_pcie_clocks_put();
return err;
@@ -1452,15 +1584,8 @@ static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
}
retry:
- if (--retries) {
- /* Pulse the PEX reset */
- reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
- afi_writel(reg, reset_reg);
- reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
- afi_writel(reg, reset_reg);
- }
-
- } while (retries);
+ tegra_pcie_port_reset(pp, reset_reg);
+ } while (--retries);
return false;
}
@@ -1888,6 +2013,15 @@ static int __init tegra_pcie_init(void)
tegra_pcie_power_off(false);
return err;
}
+
+#ifdef CONFIG_MACH_APALIS_TK1
+ gpio_request(LAN_DEV_OFF_N, "LAN_DEV_OFF_N");
+ gpio_request(LAN_RESET_N, "LAN_RESET_N");
+ gpio_request(LAN_WAKE_N, "LAN_WAKE_N");
+ if (g_pex_perst) gpio_request(PEX_PERST_N, "PEX_PERST_N");
+ gpio_request(RESET_MOCI_CTRL, "RESET_MOCI_CTRL");
+#endif /* CONFIG_MACH_APALIS_TK1 */
+
err = tegra_pcie_enable_controller();
if (err) {
pr_err("PCIE: enable controller failed\n");
@@ -1977,7 +2111,6 @@ static int __init tegra_pcie_probe(struct platform_device *pdev)
static int tegra_pcie_suspend_noirq(struct device *dev)
{
int ret = 0;
-
PR_FUNC_LINE;
/* configure PE_WAKE signal as wake sources */
if (gpio_is_valid(tegra_pcie.plat_data->gpio_wake) &&
@@ -1990,7 +2123,9 @@ static int tegra_pcie_suspend_noirq(struct device *dev)
return ret;
}
}
- return tegra_pcie_power_off(true);
+ ret = tegra_pcie_power_off(true);
+ tegra_pcie_disable_regulators();
+ return ret;
}
static bool tegra_pcie_enable_msi(bool);
@@ -2002,6 +2137,8 @@ static int tegra_pcie_resume_noirq(struct device *dev)
PR_FUNC_LINE;
resume_path = true;
+ tegra_pcie_enable_regulators();
+
if (gpio_is_valid(tegra_pcie.plat_data->gpio_wake) &&
device_may_wakeup(dev)) {
ret = disable_irq_wake(gpio_to_irq(
@@ -2019,7 +2156,12 @@ static int tegra_pcie_resume_noirq(struct device *dev)
pr_err("PCIE: Failed to power on: %d\n", ret);
return ret;
}
- tegra_pcie_enable_pads(true);
+
+ ret = tegra_pcie_enable_pads(true);
+ if (ret) {
+ tegra_pcie_power_off(true);
+ goto exit;
+ }
tegra_pcie_enable_controller();
tegra_pcie_setup_translations();
/* Set up MSI registers, if MSI have been enabled */
@@ -2027,13 +2169,13 @@ static int tegra_pcie_resume_noirq(struct device *dev)
tegra_pcie_check_ports();
if (!tegra_pcie.num_ports) {
- tegra_pcie_power_off(true);
+ ret = tegra_pcie_power_off(true);
goto exit;
}
resume_path = false;
exit:
- return 0;
+ return ret;
}
static int tegra_pcie_resume(struct device *dev)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2c1075213bec..4a5e4fddaed2 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -394,6 +394,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
return;
entry = irq_get_msi_desc(dev->irq);
+ if (entry == NULL)
+ return;
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, 0);
diff --git a/drivers/pinctrl/pinctrl-tegra124.c b/drivers/pinctrl/pinctrl-tegra124.c
index c91640892223..aa4b8b3627d4 100644
--- a/drivers/pinctrl/pinctrl-tegra124.c
+++ b/drivers/pinctrl/pinctrl-tegra124.c
@@ -214,8 +214,8 @@
#define TEGRA_PIN_PFF2 _GPIO(250)
/* All non-GPIO pins follow */
-#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
-#define _PIN(offset) (NUM_GPIOS + (offset))
+#define NUM_GPIOS (TEGRA_PIN_PFF2 + 1)
+#define _PIN(offset) (NUM_GPIOS + (offset))
/* Non-GPIO pins */
#define TEGRA_PIN_CORE_PWR_REQ _PIN(0)
@@ -237,7 +237,7 @@
#define TEGRA_PIN_DSI_B_D3_P _PIN(16)
#define TEGRA_PIN_DSI_B_D3_N _PIN(17)
-static const struct pinctrl_pin_desc tegra124_pins[] = {
+static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
@@ -337,13 +337,13 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW10 PS3"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW10 PS4"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW10 PS5"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW10 PS6"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW10 PS7"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW10 PT0"),
- PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW10 PT1"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW11 PS3"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW12 PS4"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW13 PS5"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW14 PS6"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW15 PS7"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW16 PT0"),
+ PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW17 PT1"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
@@ -418,16 +418,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+ PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
+ PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
+ PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
- PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+ PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
- PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
- PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
- PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
+ PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
- PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_P, "DSI_B_CLK_P"),
PINCTRL_PIN(TEGRA_PIN_DSI_B_CLK_N, "DSI_B_CLK_N"),
@@ -1160,6 +1160,7 @@ static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
};
+
static const unsigned dp_hpd_pff0_pins[] = {
TEGRA_PIN_DP_HPD_PFF0,
};
@@ -1180,24 +1181,24 @@ static const unsigned cpu_pwr_req_pins[] = {
TEGRA_PIN_CPU_PWR_REQ,
};
-static const unsigned owr_pins[] = {
- TEGRA_PIN_OWR,
-};
-
static const unsigned pwr_int_n_pins[] = {
TEGRA_PIN_PWR_INT_N,
};
+static const unsigned gmi_clk_lb_pins[] = {
+ TEGRA_PIN_GMI_CLK_LB,
+};
+
static const unsigned reset_out_n_pins[] = {
TEGRA_PIN_RESET_OUT_N,
};
-static const unsigned clk_32k_in_pins[] = {
- TEGRA_PIN_CLK_32K_IN,
+static const unsigned owr_pins[] = {
+ TEGRA_PIN_OWR,
};
-static const unsigned gmi_clk_lb_pins[] = {
- TEGRA_PIN_GMI_CLK_LB,
+static const unsigned clk_32k_in_pins[] = {
+ TEGRA_PIN_CLK_32K_IN,
};
static const unsigned jtag_rtck_pins[] = {
@@ -1463,15 +1464,15 @@ static const unsigned drive_gpv_pins[] = {
TEGRA_PIN_PFF2,
};
-static const unsigned drive_cec_pins[] = {
- TEGRA_PIN_HDMI_CEC_PEE3,
-};
-
static const unsigned drive_dev3_pins[] = {
TEGRA_PIN_CLK3_OUT_PEE0,
TEGRA_PIN_CLK3_REQ_PEE1,
};
+static const unsigned drive_cec_pins[] = {
+ TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
static const unsigned drive_at6_pins[] = {
TEGRA_PIN_PK1,
TEGRA_PIN_PK3,
@@ -1519,10 +1520,13 @@ static const unsigned drive_ao4_pins[] = {
enum tegra_mux_dt {
TEGRA_MUX_DT_SAFE,
TEGRA_MUX_DT_BLINK,
+ TEGRA_MUX_DT_CCLA,
TEGRA_MUX_DT_CEC,
TEGRA_MUX_DT_CLDVFS,
+ TEGRA_MUX_DT_CLK,
TEGRA_MUX_DT_CLK12,
TEGRA_MUX_DT_CPU,
+ TEGRA_MUX_DT_CSI,
TEGRA_MUX_DT_DAP,
TEGRA_MUX_DT_DAP1,
TEGRA_MUX_DT_DAP2,
@@ -1530,6 +1534,8 @@ enum tegra_mux_dt {
TEGRA_MUX_DT_DISPLAYA,
TEGRA_MUX_DT_DISPLAYA_ALT,
TEGRA_MUX_DT_DISPLAYB,
+ TEGRA_MUX_DT_DP,
+ TEGRA_MUX_DT_DSI_B,
TEGRA_MUX_DT_DTV,
TEGRA_MUX_DT_EXTPERIPH1,
TEGRA_MUX_DT_EXTPERIPH2,
@@ -1551,6 +1557,9 @@ enum tegra_mux_dt {
TEGRA_MUX_DT_IRDA,
TEGRA_MUX_DT_KBC,
TEGRA_MUX_DT_OWR,
+ TEGRA_MUX_DT_PE,
+ TEGRA_MUX_DT_PE0,
+ TEGRA_MUX_DT_PE1,
TEGRA_MUX_DT_PMI,
TEGRA_MUX_DT_PWM0,
TEGRA_MUX_DT_PWM1,
@@ -1562,6 +1571,8 @@ enum tegra_mux_dt {
TEGRA_MUX_DT_RSVD2,
TEGRA_MUX_DT_RSVD3,
TEGRA_MUX_DT_RSVD4,
+ TEGRA_MUX_DT_RTCK,
+ TEGRA_MUX_DT_SATA,
TEGRA_MUX_DT_SDMMC1,
TEGRA_MUX_DT_SDMMC2,
TEGRA_MUX_DT_SDMMC3,
@@ -1574,6 +1585,8 @@ enum tegra_mux_dt {
TEGRA_MUX_DT_SPI4,
TEGRA_MUX_DT_SPI5,
TEGRA_MUX_DT_SPI6,
+ TEGRA_MUX_DT_SYS,
+ TEGRA_MUX_DT_TMDS,
TEGRA_MUX_DT_TRACE,
TEGRA_MUX_DT_UARTA,
TEGRA_MUX_DT_UARTB,
@@ -1592,18 +1605,6 @@ enum tegra_mux_dt {
TEGRA_MUX_DT_VI_ALT3,
TEGRA_MUX_DT_VIMCLK2,
TEGRA_MUX_DT_VIMCLK2_ALT,
- TEGRA_MUX_DT_SATA,
- TEGRA_MUX_DT_CCLA,
- TEGRA_MUX_DT_PE0,
- TEGRA_MUX_DT_PE,
- TEGRA_MUX_DT_PE1,
- TEGRA_MUX_DT_DP,
- TEGRA_MUX_DT_RTCK,
- TEGRA_MUX_DT_SYS,
- TEGRA_MUX_DT_CLK,
- TEGRA_MUX_DT_TMDS,
- TEGRA_MUX_DT_CSI,
- TEGRA_MUX_DT_DSI_B,
};
static const unsigned mipi_pad_ctrl_dsi_b_pins[] = {
@@ -1619,7 +1620,6 @@ static const unsigned mipi_pad_ctrl_dsi_b_pins[] = {
TEGRA_PIN_DSI_B_D3_N,
};
-
static const char * const blink_groups[] = {
"clk_32k_out_pa0",
};
@@ -2938,10 +2938,13 @@ static const char * const safe_groups[] = {
static const struct tegra_function tegra124_functions[] = {
FUNCTION(safe),
FUNCTION(blink),
+ FUNCTION(ccla),
FUNCTION(cec),
FUNCTION(cldvfs),
+ FUNCTION(clk),
FUNCTION(clk12),
FUNCTION(cpu),
+ FUNCTION(csi),
FUNCTION(dap),
FUNCTION(dap1),
FUNCTION(dap2),
@@ -2949,6 +2952,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(displaya),
FUNCTION(displaya_alt),
FUNCTION(displayb),
+ FUNCTION(dp),
+ FUNCTION(dsi_b),
FUNCTION(dtv),
FUNCTION(extperiph1),
FUNCTION(extperiph2),
@@ -2970,6 +2975,9 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(irda),
FUNCTION(kbc),
FUNCTION(owr),
+ FUNCTION(pe),
+ FUNCTION(pe0),
+ FUNCTION(pe1),
FUNCTION(pmi),
FUNCTION(pwm0),
FUNCTION(pwm1),
@@ -2981,6 +2989,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(rsvd2),
FUNCTION(rsvd3),
FUNCTION(rsvd4),
+ FUNCTION(rtck),
+ FUNCTION(sata),
FUNCTION(sdmmc1),
FUNCTION(sdmmc2),
FUNCTION(sdmmc3),
@@ -2993,6 +3003,8 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(spi4),
FUNCTION(spi5),
FUNCTION(spi6),
+ FUNCTION(sys),
+ FUNCTION(tmds),
FUNCTION(trace),
FUNCTION(uarta),
FUNCTION(uartb),
@@ -3011,23 +3023,11 @@ static const struct tegra_function tegra124_functions[] = {
FUNCTION(vi_alt3),
FUNCTION(vimclk2),
FUNCTION(vimclk2_alt),
- FUNCTION(sata),
- FUNCTION(ccla),
- FUNCTION(pe0),
- FUNCTION(pe),
- FUNCTION(pe1),
- FUNCTION(dp),
- FUNCTION(rtck),
- FUNCTION(sys),
- FUNCTION(clk),
- FUNCTION(tmds),
- FUNCTION(csi),
- FUNCTION(dsi_b),
};
-#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
-#define PINGROUP_REG_A 0x3000 /* bank 1 */
-#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */
+#define DRV_PINGROUP_REG_A 0x868 /* bank 0 */
+#define PINGROUP_REG_A 0x3000 /* bank 1 */
+#define MIPI_PAD_CTRL_PINGROUP_REG_A 0x820 /* bank 2 */
#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
#define PINGROUP_REG_N(r) -1
@@ -3191,8 +3191,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(pu4, PWM1, UARTA, GMI, DISPLAYB, PWM1, 0x3194, N, N, N),
PINGROUP(pu5, PWM2, UARTA, GMI, DISPLAYB, PWM2, 0x3198, N, N, N),
PINGROUP(pu6, PWM3, UARTA, RSVD3, GMI, RSVD3, 0x319c, N, N, N),
- PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
- PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
+ PINGROUP(gen1_i2c_sda_pc5, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a0, Y, N, N),
+ PINGROUP(gen1_i2c_scl_pc4, I2C1, RSVD2, RSVD3, RSVD4, I2C1, 0x31a4, Y, N, N),
PINGROUP(dap4_fs_pp4, I2S3, GMI, DTV, RSVD4, I2S3, 0x31a8, N, N, N),
PINGROUP(dap4_din_pp5, I2S3, GMI, RSVD3, RSVD4, I2S3, 0x31ac, N, N, N),
PINGROUP(dap4_dout_pp6, I2S3, GMI, DTV, RSVD4, I2S3, 0x31b0, N, N, N),
@@ -3329,8 +3329,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
PINGROUP(gpio_w3_aud_pw3, SPI6, SPI1, SPI2, I2C1, SPI1, 0x33f0, N, N, N),
PINGROUP(usb_vbus_en0_pn4, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f4, Y, N, N),
PINGROUP(usb_vbus_en1_pn5, USB, RSVD2, RSVD3, RSVD4, USB, 0x33f8, Y, N, N),
- PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
- PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_in_pee5, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x33fc, N, N, N),
+ PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3, RSVD2, RSVD3, RSVD4, SDMMC3, 0x3400, N, N, N),
PINGROUP(gmi_clk_lb, SDMMC2, RSVD2, GMI, RSVD4, SDMMC2, 0x3404, N, N, N),
PINGROUP(reset_out_n, RSVD1, RSVD2, RSVD3, RESET_OUT_N, RSVD1, 0x3408, N, N, N),
PINGROUP(kb_row16_pt0, KBC, RSVD2, RSVD3, UARTC, KBC, 0x340c, N, N, N),
@@ -3374,10 +3374,10 @@ static const struct tegra_pingroup tegra124_groups[] = {
DRV_PINGROUP(cec, 0x938, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
DRV_PINGROUP(at6, 0x994, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
DRV_PINGROUP(dap5, 0x998, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(usb_vbus_en, 0x99c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(ao3, 0x9a8, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(usb_vbus_en, 0x99c, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
+ DRV_PINGROUP(ao3, 0x9a8, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
DRV_PINGROUP(ao0, 0x9b0, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
- DRV_PINGROUP(hv0, 0x9b4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
+ DRV_PINGROUP(hv0, 0x9b4, 2, 3, 4, 12, 5, -1, -1, 28, 2, -1, -1, N),
DRV_PINGROUP(sdio4, 0x9c4, 2, 3, 4, 12, 5, 20, 5, 28, 2, 30, 2, N),
DRV_PINGROUP(ao4, 0x9c8, 2, 3, 4, 12, 7, 20, 7, 28, 2, 30, 2, Y),
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index b53992ab3090..7a30562e0cc5 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -34,6 +34,7 @@ enum ds_type {
ds_1340,
ds_1388,
ds_3231,
+ m41t0,
m41t00,
mcp7941x,
rx_8025,
@@ -48,6 +49,7 @@ enum ds_type {
# define DS1340_BIT_nEOSC 0x80
# define MCP7941X_BIT_ST 0x80
#define DS1307_REG_MIN 0x01 /* 00-59 */
+#define M41T0_BIT_OF 0x80
#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
@@ -168,6 +170,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "ds1388", ds_1388 },
{ "ds1340", ds_1340 },
{ "ds3231", ds_3231 },
+ { "m41t0", m41t0 },
{ "m41t00", m41t00 },
{ "mcp7941x", mcp7941x },
{ "pt7c4338", ds_1307 },
@@ -376,6 +379,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs);
+ /* if oscillator fail bit is set, no data can be trusted */
+ if (ds1307->type == m41t0 &&
+ ds1307->regs[DS1307_REG_MIN] & M41T0_BIT_OF) {
+ dev_warn(dev, "oscillator failed, set time!\n");
+ return -EINVAL;
+ }
+
t->tm_sec = bcd2bin(ds1307->regs[DS1307_REG_SECS] & 0x7f);
t->tm_min = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f);
tmp = ds1307->regs[DS1307_REG_HOUR] & 0x3f;
@@ -837,6 +847,7 @@ read_rtc:
tmp = ds1307->regs[DS1307_REG_SECS];
switch (ds1307->type) {
case ds_1307:
+ case m41t0:
case m41t00:
/* clock halted? turn it on, so clock can tick. */
if (tmp & DS1307_BIT_CH) {
@@ -901,6 +912,7 @@ read_rtc:
tmp = ds1307->regs[DS1307_REG_HOUR];
switch (ds1307->type) {
case ds_1340:
+ case m41t0:
case m41t00:
/*
* NOTE: ignores century bits; fix before deploying
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 736d6f68d757..f0e694a8a3a2 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -931,7 +931,7 @@ static struct tegra_spi_device_controller_data
return NULL;
}
- cdata = devm_kzalloc(&spi->dev, sizeof(*cdata),
+ cdata = kzalloc(sizeof(*cdata),
GFP_KERNEL);
if (!cdata) {
dev_err(&spi->dev, "Memory alloc for cdata failed\n");
@@ -983,7 +983,7 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->controller_data = cdata;
}
- /* Set speed to the spi max fequency if spi device has not set */
+ /* Set speed to the spi max frequency if spi device has not set */
spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
ret = pm_runtime_get_sync(tspi->dev);
diff --git a/drivers/usb/gadget/tegra_udc.c b/drivers/usb/gadget/tegra_udc.c
index 89ed8be52d16..b4141f4c5d06 100644
--- a/drivers/usb/gadget/tegra_udc.c
+++ b/drivers/usb/gadget/tegra_udc.c
@@ -2804,9 +2804,9 @@ static int tegra_udc_ep_setup(struct tegra_udc *udc)
for (i = 1; i < (int)(udc->max_ep / 2); i++) {
char name[14];
- sprintf(name, "ep%dout", i);
+ sprintf(name, "ep%hhdout", i);
struct_ep_setup(udc, i * 2, name, 1);
- sprintf(name, "ep%din", i);
+ sprintf(name, "ep%hhdin", i);
struct_ep_setup(udc, i * 2 + 1, name, 1);
}
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 39ac49e0682c..7fce1987ddc7 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -82,4 +82,8 @@ config LOGO_M32R_CLUT224
depends on M32R
default y
+config LOGO_CUSTOM_CLUT224
+ bool "Custom 224-color Linux logo"
+ default n
+
endif # LOGO
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index 3b437813584c..45d4b5346d07 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
+obj-$(CONFIG_LOGO_CUSTOM_CLUT224) += logo_custom_clut224.o
+
# How to generate logo's
# Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 080c35b34bbb..ba4dfe9a71d6 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -100,6 +100,10 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
/* M32R Linux logo */
logo = &logo_m32r_clut224;
#endif
+#ifdef CONFIG_LOGO_CUSTOM_CLUT224
+ /* Custom Linux logo */
+ logo = &logo_custom_clut224;
+#endif
}
return logo;
}
diff --git a/drivers/video/tegra/dc/bandwidth.c b/drivers/video/tegra/dc/bandwidth.c
index df4f4d686243..bf7b26b6cf52 100644
--- a/drivers/video/tegra/dc/bandwidth.c
+++ b/drivers/video/tegra/dc/bandwidth.c
@@ -917,7 +917,8 @@ long tegra_dc_calc_min_bandwidth(struct tegra_dc *dc)
pclk = KHZ2PICOS(150000); /* 150MHz max */
#endif
} else if ((dc->out->type == TEGRA_DC_OUT_DP) ||
- (dc->out->type == TEGRA_DC_OUT_NVSR_DP)) {
+ (dc->out->type == TEGRA_DC_OUT_NVSR_DP) ||
+ (dc->out->type == TEGRA_DC_OUT_LVDS)) {
if (dc->mode.pclk)
pclk = KHZ2PICOS(dc->mode.pclk / 1000);
else
diff --git a/drivers/video/tegra/dc/dc.c b/drivers/video/tegra/dc/dc.c
index 588ef13c3c62..5fb2e91933f7 100644
--- a/drivers/video/tegra/dc/dc.c
+++ b/drivers/video/tegra/dc/dc.c
@@ -110,18 +110,6 @@ static struct device_dma_parameters tegra_dc_dma_parameters = {
.max_segment_size = UINT_MAX,
};
-static const struct {
- bool h;
- bool v;
-} can_filter[] = {
- /* Window A has no filtering */
- { false, false },
- /* Window B has both H and V filtering */
- { true, true },
- /* Window C has only H filtering */
- { false, true },
-};
-
#ifdef CONFIG_TEGRA_DC_CMU
static struct tegra_dc_cmu default_cmu = {
/* lut1 maps sRGB to linear space. */
@@ -1410,8 +1398,14 @@ static int tegra_dc_set_out(struct tegra_dc *dc, struct tegra_dc_out *out)
*/
tegra_dc_cache_cmu(dc, tegra_dc_get_cmu(dc));
#endif
- } else if (out->n_modes > 0)
+ }
+/* Donot set the mode now, in later stage we parse the vidargs from kernel args
+ * and set the mode accordingly
+ */
+#if 0
+ else if (out->n_modes > 0)
tegra_dc_set_mode(dc, &dc->out->modes[0]);
+#endif
switch (out->type) {
case TEGRA_DC_OUT_RGB:
diff --git a/drivers/video/tegra/dc/dp.c b/drivers/video/tegra/dc/dp.c
index 2e47617d3006..943e3f115d80 100644
--- a/drivers/video/tegra/dc/dp.c
+++ b/drivers/video/tegra/dc/dp.c
@@ -1175,7 +1175,10 @@ static int _tegra_dp_fast_lt(struct tegra_dc_dp_data *dp,
tegra_dp_tpg(dp, TRAINING_PATTERN_1, n_lanes);
tegra_dp_wait_aux_training(dp, true);
cr_done = tegra_dp_clock_recovery_status(dp);
- cr_done ? : ({ret = -EINVAL; goto fail; });
+ if (!cr_done) {
+ ret = -EINVAL;
+ goto fail;
+ }
if (cfg->tps3_supported)
tegra_dp_tpg(dp, TRAINING_PATTERN_3, n_lanes);
@@ -1183,7 +1186,10 @@ static int _tegra_dp_fast_lt(struct tegra_dc_dp_data *dp,
tegra_dp_tpg(dp, TRAINING_PATTERN_2, n_lanes);
tegra_dp_wait_aux_training(dp, false);
lt_done = tegra_dp_lt_status(dp);
- lt_done ? : ({ret = -EINVAL; goto fail; });
+ if (!lt_done) {
+ ret = -EINVAL;
+ goto fail;
+ }
cfg->lt_data_valid = true;
@@ -2119,6 +2125,7 @@ static void tegra_dc_dp_enable(struct tegra_dc *dc)
error_enable:
tegra_dp_default_int(dp, false);
+ tegra_dp_disable_irq(dp->irq);
tegra_dpaux_pad_power(dp->dc, false);
tegra_dpaux_clk_disable(dp);
tegra_dc_io_end(dc);
diff --git a/drivers/video/tegra/dc/dsi.c b/drivers/video/tegra/dc/dsi.c
index 284e0532e0cb..8cb81fdd29be 100644
--- a/drivers/video/tegra/dc/dsi.c
+++ b/drivers/video/tegra/dc/dsi.c
@@ -754,7 +754,7 @@ static void tegra_dsi_init_sw(struct tegra_dc *dc,
}
#define SELECT_T_PHY(platform_t_phy_ps, default_phy, clk_ps, hw_inc) ( \
-(platform_t_phy_ps) ? ( \
+(platform_t_phy_ps) != 0 ? ( \
((DSI_CONVERT_T_PHY_PS_TO_T_PHY(platform_t_phy_ps, clk_ps, hw_inc)) < 0 ? 0 : \
(DSI_CONVERT_T_PHY_PS_TO_T_PHY(platform_t_phy_ps, clk_ps, hw_inc)))) : \
((default_phy) < 0 ? 0 : (default_phy)))
diff --git a/drivers/video/tegra/dc/lvds.c b/drivers/video/tegra/dc/lvds.c
index 36842cf7b4fd..45ab77f92dc1 100644
--- a/drivers/video/tegra/dc/lvds.c
+++ b/drivers/video/tegra/dc/lvds.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <mach/dc.h>
+#include <linux/clk.h>
#include "lvds.h"
#include "dc_priv.h"
@@ -67,6 +68,9 @@ static void tegra_dc_lvds_enable(struct tegra_dc *dc)
tegra_dc_io_start(dc);
+ tegra_sor_clk_enable(lvds->sor);
+ clk_prepare_enable(lvds->clk);
+
/* Power on panel */
tegra_sor_pad_cal_power(lvds->sor, true);
tegra_dc_sor_set_internal_panel(lvds->sor, true);
@@ -78,7 +82,7 @@ static void tegra_dc_lvds_enable(struct tegra_dc *dc)
static void tegra_dc_lvds_disable(struct tegra_dc *dc)
{
struct tegra_dc_lvds_data *lvds = tegra_dc_get_outdata(dc);
-
+ clk_disable_unprepare(lvds->clk);
/* Power down SOR */
tegra_dc_sor_disable(lvds->sor, true);
}
@@ -88,7 +92,7 @@ static void tegra_dc_lvds_suspend(struct tegra_dc *dc)
{
struct tegra_dc_lvds_data *lvds = tegra_dc_get_outdata(dc);
- tegra_dc_lvds_disable(dc);
+ clk_prepare_enable(lvds->clk);
lvds->suspended = true;
}
@@ -99,7 +103,7 @@ static void tegra_dc_lvds_resume(struct tegra_dc *dc)
if (!lvds->suspended)
return;
- tegra_dc_lvds_enable(dc);
+ clk_disable_unprepare(lvds->clk);
}
static long tegra_dc_lvds_setup_clk(struct tegra_dc *dc, struct clk *clk)
@@ -107,12 +111,18 @@ static long tegra_dc_lvds_setup_clk(struct tegra_dc *dc, struct clk *clk)
struct tegra_dc_lvds_data *lvds = tegra_dc_get_outdata(dc);
struct clk *parent_clk;
- tegra_sor_setup_clk(lvds->sor, clk, true);
+ parent_clk = clk_get_sys(NULL, dc->out->parent_clk ? : "pll_d_out0");
- parent_clk = clk_get_parent(clk);
if (clk_get_parent(lvds->sor->sor_clk) != parent_clk)
clk_set_parent(lvds->sor->sor_clk, parent_clk);
+ if (clk_get_parent(clk) != parent_clk)
+ clk_set_parent(clk, parent_clk);
+
+ tegra_sor_setup_clk(lvds->sor, clk, true);
+
+ lvds->clk = clk;
+
return tegra_dc_pclk_round_rate(dc, lvds->sor->dc->mode.pclk);
}
diff --git a/drivers/video/tegra/dc/lvds.h b/drivers/video/tegra/dc/lvds.h
index 96db6d759cfe..ccd366db53f6 100644
--- a/drivers/video/tegra/dc/lvds.h
+++ b/drivers/video/tegra/dc/lvds.h
@@ -23,6 +23,7 @@
struct tegra_dc_lvds_data {
struct tegra_dc *dc;
struct tegra_dc_sor_data *sor;
+ struct clk *clk;
bool suspended;
};
diff --git a/drivers/video/tegra/dc/mode.c b/drivers/video/tegra/dc/mode.c
index b600c70286e3..ab8907815749 100644
--- a/drivers/video/tegra/dc/mode.c
+++ b/drivers/video/tegra/dc/mode.c
@@ -32,6 +32,115 @@
#include "dc_reg.h"
#include "dc_priv.h"
+const struct fb_videomode tegra_modes[] = {
+ /* TouchRevolution Fusion 10" aka Chunghwa Picture Tubes
+ * CLAA100NC05 10.1 inch 1024x600 single channel LVDS panel
+ */
+ {
+ .name = "1024x600",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 600,
+ .pixclock = 20833,
+ .left_margin = 104,
+ .right_margin = 43,
+ .upper_margin = 24,
+ .lower_margin = 20,
+ .hsync_len = 5,
+ .vsync_len = 5,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .flag = FB_FLAG_RATIO_16_9,
+ .vmode = FB_VMODE_NONINTERLACED
+ },
+ {
+ /* 1366x768 */
+ .refresh = 60,
+ .xres = 1366,
+ .yres = 768,
+ .pixclock = KHZ2PICOS(72072),
+ .hsync_len = 58, /* h_sync_width */
+ .vsync_len = 4, /* v_sync_width */
+ .left_margin = 58, /* h_back_porch */
+ .upper_margin = 4, /* v_back_porch */
+ .right_margin = 58, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+ {
+ /* 1680x1050p 59.94/60hz */
+ .refresh = 60,
+ .xres = 1680,
+ .yres = 1050,
+ .pixclock = KHZ2PICOS(147140),
+ .hsync_len = 184, /* h_sync_width */
+ .vsync_len = 3, /* v_sync_width */
+ .left_margin = 288, /* h_back_porch */
+ .upper_margin = 33, /* v_back_porch */
+ .right_margin = 104, /* h_front_porch */
+ .lower_margin = 1, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+{
+ /* 1920x1080p 59.94/60hz CVT */
+ .refresh = 60,
+ .xres = 1920,
+ .yres = 1080,
+ .pixclock = KHZ2PICOS(148500),
+ .hsync_len = 44, /* h_sync_width */
+ .vsync_len = 5, /* v_sync_width */
+ .left_margin = 148, /* h_back_porch */
+ .upper_margin = 36, /* v_back_porch */
+ .right_margin = 88, /* h_front_porch */
+ .lower_margin = 4, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ },
+ {
+ /* 1920x1200p 60hz */
+ .refresh = 60,
+ .xres = 1920,
+ .yres = 1200,
+ .pixclock = KHZ2PICOS(154000),
+ .hsync_len = 32, /* h_sync_width */
+ .vsync_len = 6, /* v_sync_width */
+ .left_margin = 80, /* h_back_porch */
+ .upper_margin = 26, /* v_back_porch */
+ .right_margin = 48, /* h_front_porch */
+ .lower_margin = 3, /* v_front_porch */
+ .vmode = FB_VMODE_NONINTERLACED,
+ .sync = 0,
+ },
+};
+
+/* try to find best matching mode using our modes, VESA and CEA modes from
+ * modedb
+ */
+int tegra_fb_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
+ const char* option, unsigned int default_bpp)
+{
+ int out;
+
+ out = fb_find_mode(var, info, option, tegra_modes,
+ ARRAY_SIZE(tegra_modes), NULL, default_bpp);
+
+ /* Only accept this mode if we found a reasonable match (resolution) */
+ if (out == 1 || out == 2)
+ return out;
+
+ out = fb_find_mode(&info->var, info, option,
+ cea_modes, CEA_MODEDB_SIZE, NULL, default_bpp);
+
+ /* Check if we found a full match */
+ if (out == 1 || out == 2)
+ return out;
+
+ return fb_find_mode(&info->var, info, option,
+ vesa_modes, VESA_MODEDB_SIZE, NULL, default_bpp);
+}
+EXPORT_SYMBOL(tegra_fb_find_mode);
+
/* return non-zero if constraint is violated */
static int calc_h_ref_to_sync(const struct tegra_dc_mode *mode, int *href)
{
@@ -417,6 +526,12 @@ static int _tegra_dc_set_mode(struct tegra_dc *dc,
}
memcpy(&dc->mode, mode, sizeof(dc->mode));
+
+ dev_info(&dc->ndev->dev, "using mode %dx%d pclk=%d href=%d vref=%d\n",
+ mode->h_active, mode->v_active, mode->pclk,
+ mode->h_ref_to_sync, mode->v_ref_to_sync
+ );
+
dc->mode_dirty = true;
if (dc->out->type == TEGRA_DC_OUT_RGB)
@@ -440,6 +555,78 @@ int tegra_dc_set_mode(struct tegra_dc *dc, const struct tegra_dc_mode *mode)
}
EXPORT_SYMBOL(tegra_dc_set_mode);
+int tegra_dc_var_to_dc_mode(struct tegra_dc *dc, struct fb_var_screeninfo *var,
+ struct tegra_dc_mode *mode)
+{
+ bool stereo_mode = false;
+ int err;
+
+ if (!var->pixclock)
+ return -EINVAL;
+
+ mode->pclk = PICOS2KHZ(var->pixclock) * 1000;
+ mode->h_sync_width = var->hsync_len;
+ mode->v_sync_width = var->vsync_len;
+ mode->h_back_porch = var->left_margin;
+ mode->v_back_porch = var->upper_margin;
+ mode->h_active = var->xres;
+ mode->v_active = var->yres;
+ mode->h_front_porch = var->right_margin;
+ mode->v_front_porch = var->lower_margin;
+ mode->stereo_mode = stereo_mode;
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ /* HDMI controller requires h_ref=1, v_ref=1 */
+ mode->h_ref_to_sync = 1;
+ mode->v_ref_to_sync = 1;
+ } else {
+ /*
+ * HACK:
+ * If v_front_porch is only 1, we would violate Constraint 5/6
+ * in this case, increase front porch by 1
+ */
+ if (mode->v_front_porch <= 1)
+ mode->v_front_porch = 2;
+
+ err = calc_ref_to_sync(mode);
+ if (err) {
+ dev_err(&dc->ndev->dev, "display timing ref_to_sync"
+ "calculation failed with code %d\n", err);
+ return -EINVAL;
+ }
+ dev_info(&dc->ndev->dev, "Calculated sync href=%d vref=%d\n",
+ mode->h_ref_to_sync, mode->v_ref_to_sync);
+ }
+ if (!check_ref_to_sync(mode)) {
+ dev_err(&dc->ndev->dev,
+ "display timing doesn't meet restrictions.\n");
+ return -EINVAL;
+ }
+
+#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
+ /* Double the pixel clock and update v_active only for
+ * frame packed mode */
+ if (mode->stereo_mode) {
+ mode->pclk *= 2;
+ /* total v_active = yres*2 + activespace */
+ mode->v_active = var->yres * 2 +
+ var->vsync_len +
+ var->upper_margin +
+ var->lower_margin;
+ }
+#endif
+
+ mode->flags = 0;
+
+ if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
+ mode->flags |= TEGRA_DC_MODE_FLAG_NEG_H_SYNC;
+
+ if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
+ mode->flags |= TEGRA_DC_MODE_FLAG_NEG_V_SYNC;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_dc_var_to_dc_mode);
+
int tegra_dc_to_fb_videomode(struct fb_videomode *fbmode,
const struct tegra_dc_mode *mode)
{
@@ -554,6 +741,7 @@ EXPORT_SYMBOL(tegra_dc_set_drm_mode);
int tegra_dc_set_fb_mode(struct tegra_dc *dc,
const struct fb_videomode *fbmode, bool stereo_mode)
{
+ int err;
struct tegra_dc_mode mode;
if (!fbmode->pixclock)
@@ -570,6 +758,7 @@ int tegra_dc_set_fb_mode(struct tegra_dc *dc,
mode.h_front_porch = fbmode->right_margin;
mode.v_front_porch = fbmode->lower_margin;
mode.stereo_mode = stereo_mode;
+#if 0
mode.vmode = fbmode->vmode;
if (fbmode->flag & FB_FLAG_RATIO_16_9)
mode.avi_m = TEGRA_DC_MODE_AVI_M_16_9;
@@ -580,6 +769,35 @@ int tegra_dc_set_fb_mode(struct tegra_dc *dc,
if (!check_mode_timings(dc, &mode))
return -EINVAL;
+#endif
+
+ if (dc->out->type == TEGRA_DC_OUT_HDMI) {
+ /* HDMI controller requires h_ref=1, v_ref=1 */
+ mode.h_ref_to_sync = 1;
+ mode.v_ref_to_sync = 1;
+ } else {
+ /*
+ * HACK:
+ * If v_front_porch is only 1, we would violate Constraint 5/6
+ * in this case, increase front porch by 1
+ */
+ if (mode.v_front_porch <= 1)
+ mode.v_front_porch = 2;
+
+ err = calc_ref_to_sync(&mode);
+ if (err) {
+ dev_err(&dc->ndev->dev, "display timing ref_to_sync"
+ "calculation failed with code %d\n", err);
+ return -EINVAL;
+ }
+ dev_info(&dc->ndev->dev, "Calculated sync href=%d vref=%d\n",
+ mode.h_ref_to_sync, mode.v_ref_to_sync);
+ }
+ if (!check_ref_to_sync(&mode)) {
+ dev_err(&dc->ndev->dev,
+ "display timing doesn't meet restrictions.\n");
+ return -EINVAL;
+ }
#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
/* Double the pixel clock and update v_active only for
diff --git a/drivers/video/tegra/dc/sor.c b/drivers/video/tegra/dc/sor.c
index e5fe66be55d0..ee75338e4037 100644
--- a/drivers/video/tegra/dc/sor.c
+++ b/drivers/video/tegra/dc/sor.c
@@ -186,6 +186,7 @@ static int dbg_sor_show(struct seq_file *s, void *unused)
DUMP_REG(NV_SOR_DC(0));
DUMP_REG(NV_SOR_DC(1));
DUMP_REG(NV_SOR_LANE_DRIVE_CURRENT(0));
+ DUMP_REG(NV_SOR_LANE4_DRIVE_CURRENT(0));
DUMP_REG(NV_SOR_PR(0));
DUMP_REG(NV_SOR_LANE4_PREEMPHASIS(0));
DUMP_REG(NV_SOR_POSTCURSOR(0));
@@ -1184,6 +1185,33 @@ void tegra_dc_sor_detach(struct tegra_dc_sor_data *sor)
tegra_dc_put(dc);
}
+static void tegra_sor_config_lvds_clk(struct tegra_dc_sor_data *sor)
+{
+ int flag = tegra_is_clk_enabled(sor->sor_clk);
+
+ if (sor->clk_type == TEGRA_SOR_LINK_CLK)
+ return;
+
+ tegra_sor_writel(sor, NV_SOR_CLK_CNTRL,
+ NV_SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK |
+ NV_SOR_CLK_CNTRL_DP_LINK_SPEED_LVDS);
+
+ tegra_dc_sor_set_link_bandwidth(sor, SOR_LINK_SPEED_LVDS);
+
+ /*
+ * HW bug 1425607
+ * Disable clocks to avoid glitch when switching
+ * between safe clock and macro pll clock
+ */
+ if (flag) clk_disable_unprepare(sor->sor_clk);
+
+ tegra_clk_cfg_ex(sor->sor_clk, TEGRA_CLK_SOR_CLK_SEL, 1);
+
+ if (flag) clk_prepare_enable(sor->sor_clk);
+
+ sor->clk_type = TEGRA_SOR_LINK_CLK;
+}
+
void tegra_dc_sor_enable_lvds(struct tegra_dc_sor_data *sor,
bool balanced, bool conforming)
{
@@ -1230,6 +1258,9 @@ void tegra_dc_sor_enable_lvds(struct tegra_dc_sor_data *sor,
tegra_sor_writel(sor, NV_SOR_LVDS, reg_val);
tegra_sor_writel(sor, NV_SOR_LANE_DRIVE_CURRENT(sor->portnum),
0x40404040);
+ if (!conforming && (sor->dc->pdata->default_out->depth == 24))
+ tegra_sor_writel(sor, NV_SOR_LANE4_DRIVE_CURRENT(sor->portnum),
+ 0x40);
#if 0
tegra_sor_write_field(sor, NV_SOR_LVDS,
@@ -1253,20 +1284,10 @@ void tegra_dc_sor_enable_lvds(struct tegra_dc_sor_data *sor,
NV_SOR_DP_SPARE_SOR_CLK_SEL_MACRO_SORCLK);
tegra_dc_sor_enable_lane_sequencer(sor, true, true);
- tegra_dc_sor_set_link_bandwidth(sor, SOR_LINK_SPEED_LVDS);
+ tegra_sor_config_lvds_clk(sor);
tegra_dc_sor_attach_lvds(sor);
- tegra_sor_writel(sor, NV_SOR_CLK_CNTRL,
- NV_SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK |
- NV_SOR_CLK_CNTRL_DP_LINK_SPEED_LVDS);
-
-
-
-
- /* re-enable SOR clock */
- tegra_clk_cfg_ex(sor->sor_clk, TEGRA_CLK_SOR_CLK_SEL, 1);
-
if ((tegra_dc_sor_set_power_state(sor, 1))) {
dev_err(&sor->dc->ndev->dev,
"Failed to power up SOR sequencer for LVDS\n");
diff --git a/drivers/video/tegra/dc/sor_regs.h b/drivers/video/tegra/dc/sor_regs.h
index cbf4b94c1664..8e1cc1c3231c 100644
--- a/drivers/video/tegra/dc/sor_regs.h
+++ b/drivers/video/tegra/dc/sor_regs.h
@@ -611,6 +611,7 @@
#define NV_SOR_DC_LANE0_DP_LANE2_P1_LEVEL2 (43)
#define NV_SOR_DC_LANE0_DP_LANE2_P0_LEVEL3 (51)
#define NV_SOR_LANE_DRIVE_CURRENT(i) (0x4e + (i))
+#define NV_SOR_LANE4_DRIVE_CURRENT(i) (0x50 + (i))
#define NV_SOR_PR(i) (0x52 + (i))
#define NV_SOR_PR_LANE3_DP_LANE3_SHIFT (24)
#define NV_SOR_PR_LANE3_DP_LANE3_MASK (0xff << 24)
diff --git a/drivers/video/tegra/fb.c b/drivers/video/tegra/fb.c
index db0378d738bb..61db9fe3bc32 100644
--- a/drivers/video/tegra/fb.c
+++ b/drivers/video/tegra/fb.c
@@ -85,9 +85,36 @@ static int tegra_fb_check_var(struct fb_var_screeninfo *var,
var->yoffset = yoffset;
}
+ var->xres_virtual = var->xres;
/* Double yres_virtual to allow double buffering through pan_display */
var->yres_virtual = var->yres * 2;
+ /* we only support RGB ordering for now */
+ switch (var->bits_per_pixel) {
+ case 32:
+ case 24:
+ var->bits_per_pixel = 32;
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ case 16:
+ default:
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ break;
+ }
+
return 0;
}
@@ -96,102 +123,56 @@ static int tegra_fb_set_par(struct fb_info *info)
struct tegra_fb_info *tegra_fb = info->par;
struct fb_var_screeninfo *var = &info->var;
struct tegra_dc *dc = tegra_fb->win.dc;
+ int err;
- if (var->bits_per_pixel) {
- /* we only support RGB ordering for now */
- switch (var->bits_per_pixel) {
- case 32:
- var->red.offset = 0;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 16;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- tegra_fb->win.fmt = TEGRA_WIN_FMT_R8G8B8A8;
- break;
- case 16:
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- tegra_fb->win.fmt = TEGRA_WIN_FMT_B5G6R5;
- break;
-
- default:
- return -EINVAL;
- }
- /* if line_length unset, then pad the stride */
- if (!info->fix.line_length) {
- info->fix.line_length = var->xres * var->bits_per_pixel
- / 8;
- info->fix.line_length = round_up(info->fix.line_length,
- TEGRA_LINEAR_PITCH_ALIGNMENT);
- }
- tegra_fb->win.stride = info->fix.line_length;
- tegra_fb->win.stride_uv = 0;
- tegra_fb->win.phys_addr_u = 0;
- tegra_fb->win.phys_addr_v = 0;
- }
+ struct tegra_dc_mode mode;
- if (var->pixclock) {
- bool stereo;
- unsigned old_len = 0;
- struct fb_videomode m;
- struct fb_videomode *old_mode = NULL;
- struct tegra_fb_info *tegra_fb = info->par;
+ switch (var->bits_per_pixel) {
+ case 32:
+ tegra_fb->win.fmt = TEGRA_WIN_FMT_R8G8B8A8;
+ break;
+ case 16:
+ tegra_fb->win.fmt = TEGRA_WIN_FMT_B5G6R5;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ /* if line_length unset, then pad the stride */
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ info->fix.line_length = round_up(info->fix.line_length,
+ TEGRA_LINEAR_PITCH_ALIGNMENT);
+ tegra_fb->win.stride = info->fix.line_length;
+ tegra_fb->win.stride_uv = 0;
+ tegra_fb->win.phys_addr_u = 0;
+ tegra_fb->win.phys_addr_v = 0;
- fb_var_to_videomode(&m, var);
+ tegra_fb->win.w.full = dfixed_const(var->xres);
+ tegra_fb->win.h.full = dfixed_const(var->yres);
+ tegra_fb->win.out_w = var->xres;
+ tegra_fb->win.out_h = var->yres;
- /* Load framebuffer info with new mode details*/
- old_mode = info->mode;
- old_len = info->fix.line_length;
- memcpy(&tegra_fb->mode, &m, sizeof(tegra_fb->mode));
- info->mode = (struct fb_videomode *)&tegra_fb->mode;
- if (!info->mode) {
- dev_warn(&tegra_fb->ndev->dev, "can't match video mode\n");
- info->mode = old_mode;
- return -EINVAL;
- }
+ dev_info(&tegra_fb->ndev->dev, "switching framebuffer to %dx%d\n",
+ var->xres, var->yres);
- /* Update fix line_length and window stride as per new mode */
- info->fix.line_length = var->xres * var->bits_per_pixel / 8;
- info->fix.line_length = round_up(info->fix.line_length,
- TEGRA_LINEAR_PITCH_ALIGNMENT);
- tegra_fb->win.stride = info->fix.line_length;
+ err = tegra_dc_var_to_dc_mode(dc, var, &mode);
+ if (err) {
+ dev_warn(&tegra_fb->ndev->dev, "could not convert var %d\n", err);
+ return -EINVAL;
+ }
- /*
- * only enable stereo if the mode supports it and
- * client requests it
- */
- stereo = !!(var->vmode & info->mode->vmode &
-#ifndef CONFIG_TEGRA_HDMI_74MHZ_LIMIT
- FB_VMODE_STEREO_FRAME_PACK);
-#else
- FB_VMODE_STEREO_LEFT_RIGHT);
-#endif
+ err = tegra_dc_set_mode(dc, &mode);
+ if (err) {
+ dev_warn(&tegra_fb->ndev->dev, "could not set dc mode %d\n", err);
+ return -EINVAL;
+ }
- /* Configure DC with new mode */
- if (tegra_dc_set_fb_mode(dc, info->mode, stereo)) {
- /* Error while configuring DC, fallback to old mode */
- dev_warn(&tegra_fb->ndev->dev, "can't configure dc with mode %ux%u\n",
- info->mode->xres, info->mode->yres);
- info->mode = old_mode;
- info->fix.line_length = old_len;
- tegra_fb->win.stride = old_len;
- return -EINVAL;
- }
+ if (dc->enabled)
+ tegra_dc_disable(dc);
+ tegra_dc_enable(dc);
- tegra_fb->win.w.full = dfixed_const(info->mode->xres);
- tegra_fb->win.h.full = dfixed_const(info->mode->yres);
- tegra_fb->win.out_w = info->mode->xres;
- tegra_fb->win.out_h = info->mode->yres;
- }
- return 0;
+ return err;
}
static int tegra_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -692,9 +673,10 @@ struct tegra_fb_info *tegra_fb_register(struct platform_device *ndev,
void __iomem *fb_base = NULL;
phys_addr_t fb_size = 0;
int ret = 0;
- int mode_idx;
unsigned stride;
- struct fb_videomode m;
+ char *param_option = NULL;
+ const char *option = NULL;
+ char driver[10];
if (!tegra_dc_get_window(dc, fb_data->win)) {
dev_err(&ndev->dev, "dc does not have a window at index %d\n",
@@ -711,8 +693,6 @@ struct tegra_fb_info *tegra_fb_register(struct platform_device *ndev,
tegra_fb = info->par;
tegra_fb->ndev = ndev;
tegra_fb->fb_mem = fb_mem;
- tegra_fb->xres = fb_data->xres;
- tegra_fb->yres = fb_data->yres;
tegra_fb->win.idx = fb_data->win;
@@ -751,10 +731,14 @@ struct tegra_fb_info *tegra_fb_register(struct platform_device *ndev,
info->fix.smem_start = (u32)tegra_fb->phys_start;
info->fix.smem_len = fb_size;
info->fix.line_length = stride;
+
+#if 0
INIT_LIST_HEAD(&info->modelist);
/* pick first mode as the default for initialization */
tegra_dc_to_fb_videomode(&m, &dc->mode);
fb_videomode_to_var(&info->var, &m);
+#endif
+
info->var.xres_virtual = fb_data->xres;
info->var.yres_virtual = fb_data->yres * 2;
info->var.bits_per_pixel = fb_data->bits_per_pixel;
@@ -767,7 +751,7 @@ struct tegra_fb_info *tegra_fb_register(struct platform_device *ndev,
tegra_fb->win.y.full = dfixed_const(0);
tegra_fb->win.w.full = dfixed_const(fb_data->xres);
tegra_fb->win.h.full = dfixed_const(fb_data->yres);
- /* TODO: set to output res dc */
+
tegra_fb->win.out_x = 0;
tegra_fb->win.out_y = 0;
tegra_fb->win.out_w = fb_data->xres;
@@ -782,18 +766,32 @@ struct tegra_fb_info *tegra_fb_register(struct platform_device *ndev,
tegra_fb->win.flags = TEGRA_WIN_FLAG_ENABLED;
tegra_fb->win.global_alpha = 0xFF;
- for (mode_idx = 0; mode_idx < dc->out->n_modes; mode_idx++) {
- struct tegra_dc_mode mode = dc->out->modes[mode_idx];
- struct fb_videomode vmode;
-
- mode.pclk = dc->mode.pclk;
+ /* try to use kernel cmd line specified mode */
+ sprintf(driver, "tegrafb%d", ndev->id);
+ fb_get_options(driver, &param_option);
+ if (param_option != NULL) {
+ option = param_option;
+ dev_info(&ndev->dev, "parse cmd options for %s: %s\n",
+ driver, option);
+ } else {
+ option = dc->out->default_mode;
+ dev_info(&ndev->dev, "use default mode for %s: %s\n",
+ driver, option);
+ }
- if (mode.pclk > 1000) {
- tegra_dc_to_fb_videomode(&vmode, &mode);
- fb_add_videomode(&vmode, &info->modelist);
+ if (option != NULL)
+ {
+ if (!strcmp(option, "off")) {
+ ret = -ENODEV;
+ goto err_iounmap_fb;
+ }
+ if (!tegra_fb_find_mode(&info->var, info, option, 16)) {
+ ret = -EINVAL;
+ goto err_iounmap_fb;
}
}
+ /* activate current settings.. */
if (fb_mem)
tegra_fb_set_par(info);
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 24545cd90a25..abbac368aa1c 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -97,10 +97,116 @@
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
-#define __gcc_header(x) #x
-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
-#define gcc_header(x) _gcc_header(x)
-#include gcc_header(__GNUC__)
+/* gcc version specific checks */
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION < 30300
+# define __used __attribute__((__unused__))
+#else
+# define __used __attribute__((__used__))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#if GCC_VERSION >= 40000
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __compiler_offsetof(a, b) \
+ __builtin_offsetof(a, b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ * to them will be unlikely. This means a lot of manual unlikely()s
+ * are unnecessary now for any paths leading to the usual suspects
+ * like BUG(), printk(), panic() etc. [but let's keep them for now for
+ * older compilers]
+ *
+ * Early snapshots of gcc 4.3 don't support this and we can't detect this
+ * in the preprocessor, but we can live with this because they're unreleased.
+ * Maketime probing would be overkill here.
+ *
+ * gcc also has a __attribute__((__hot__)) to move hot functions into
+ * a special section, but I don't see any sense in this right now in
+ * the kernel context
+ */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#endif /* gcc version >= 40000 specific checks */
#if !defined(__noclone)
#define __noclone /* not needed */
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
deleted file mode 100644
index 7d89febe4d79..000000000000
--- a/include/linux/compiler-gcc3.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION >= 30300
-# define __used __attribute__((__used__))
-#else
-# define __used __attribute__((__unused__))
-#endif
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
deleted file mode 100644
index 2507fd2a1eb4..000000000000
--- a/include/linux/compiler-gcc4.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#ifndef __LINUX_COMPILER_H
-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
-#endif
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
-
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- to them will be unlikely. This means a lot of manual unlikely()s
- are unnecessary now for any paths leading to the usual suspects
- like BUG(), printk(), panic() etc. [but let's keep them for now for
- older compilers]
-
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
- gcc also has a __attribute__((__hot__)) to move hot functions into
- a special section, but I don't see any sense in this right now in
- the kernel context */
-#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40500
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- * Fixed in GCC 4.8.2 and later versions.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#if GCC_VERSION >= 40400
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
-#define __HAVE_BUILTIN_BSWAP16__
-#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index ca5bd91d12e1..2be299513819 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -47,6 +47,7 @@ extern const struct linux_logo logo_superh_vga16;
extern const struct linux_logo logo_superh_clut224;
extern const struct linux_logo logo_m32r_clut224;
extern const struct linux_logo logo_spe_clut224;
+extern const struct linux_logo logo_custom_clut224;
extern const struct linux_logo *fb_find_logo(int depth);
#ifdef CONFIG_FB_LOGO_EXTRA
diff --git a/include/linux/mfd/apalis-tk1-k20.h b/include/linux/mfd/apalis-tk1-k20.h
new file mode 100644
index 000000000000..47b906e181e1
--- /dev/null
+++ b/include/linux/mfd/apalis-tk1-k20.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2016-2017 Toradex AG
+ * Dominik Sliwa <dominik.sliwa@toradex.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_APALIS_TK1_K20_H
+#define __LINUX_MFD_APALIS_TK1_K20_H
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+/* Commands and registers used in SPI communication */
+
+/* Commands*/
+#define APALIS_TK1_K20_READ_INST 0x0F
+#define APALIS_TK1_K20_WRITE_INST 0xF0
+#define APALIS_TK1_K20_BULK_WRITE_INST 0x3C
+#define APALIS_TK1_K20_BULK_READ_INST 0xC3
+
+#define APALIS_TK1_K20_MAX_BULK (32)
+
+/* General registers*/
+#define APALIS_TK1_K20_STAREG 0x00 /* general status register RO */
+#define APALIS_TK1_K20_REVREG 0x01 /* FW revision register RO*/
+#define APALIS_TK1_K20_IRQREG 0x02 /* IRQ status RW(write of 1 will reset the bit) */
+#define APALIS_TK1_K20_CTRREG 0x03 /* general control register RW */
+#define APALIS_TK1_K20_MSQREG 0x04 /* IRQ mask register RW */
+
+/* 0x05-0x0F Reserved */
+
+/* CAN Registers */
+#define APALIS_TK1_K20_CANREG 0x10 /* CAN0 control & status register RW */
+#define APALIS_TK1_K20_CANERR 0x11 /* CAN0 error register RW */
+#define APALIS_TK1_K20_CAN_BAUD_REG 0x12 /* CAN0 baud set register RW */
+#define APALIS_TK1_K20_CAN_BIT_1 0x13 /* CAN0 bit timing register 1 RW */
+#define APALIS_TK1_K20_CAN_BIT_2 0x14 /* CAN0 bit timing register 2 RW */
+#define APALIS_TK1_K20_CAN_IN_BUF_CNT 0x15 /* CAN0 IN received data count RO */
+#define APALIS_TK1_K20_CAN_IN_BUF 0x16 /* CAN0 IN RO */
+/* buffer size is 13 bytes */
+#define APALIS_TK1_K20_CAN_IN_BUF_END 0x22 /* CAN0 IN RO */
+#define APALIS_TK1_K20_CAN_OUT_BUF_CNT 0x23 /* CAN0 OUT data Count WO */
+#define APALIS_TK1_K20_CAN_OUT_BUF 0x26 /* CAN0 OUT WO */
+/* buffer size is 13 bytes */
+#define APALIS_TK1_K20_CAN_OUT_BUF_END (APALIS_TK1_K20_CAN_OUT_BUF + 13 - 1)/* CAN OUT BUF END */
+#define APALIS_TK1_K20_CAN_DEV_OFFSET(x) (x ? 0x30 : 0)
+
+/* 0x33-0x3F Reserved */
+/* 0x40-0x62 CAN1 registers same layout as CAN0*/
+/* 0x63-0x6F Reserved */
+
+/* ADC Registers */
+#define APALIS_TK1_K20_ADCREG 0x70 /* ADC control & status register RW */
+#define APALIS_TK1_K20_ADC_CH0L 0x71 /* ADC Channel 0 LSB RO */
+#define APALIS_TK1_K20_ADC_CH0H 0x72 /* ADC Channel 0 MSB RO */
+#define APALIS_TK1_K20_ADC_CH1L 0x73 /* ADC Channel 1 LSB RO */
+#define APALIS_TK1_K20_ADC_CH1H 0x74 /* ADC Channel 1 MSB RO */
+#define APALIS_TK1_K20_ADC_CH2L 0x75 /* ADC Channel 2 LSB RO */
+#define APALIS_TK1_K20_ADC_CH2H 0x76 /* ADC Channel 2 MSB RO */
+#define APALIS_TK1_K20_ADC_CH3L 0x77 /* ADC Channel 3 LSB RO */
+#define APALIS_TK1_K20_ADC_CH3H 0x78 /* ADC Channel 3 MSB RO */
+/* Bulk read of LSB register can be use to read entire 16-bit in one command
+ * Bulk read of APALIS_TK1_K20_ADC_CH0L register can be use to read all
+ * ADC channels in one command
+ */
+
+/* 0x79-0x7F reserved */
+
+/* TSC Register */
+#define APALIS_TK1_K20_TSCREG 0x80 /* TSC control & status register RW */
+#define APALIS_TK1_K20_TSC_XML 0x81 /* TSC X- data LSB RO */
+#define APALIS_TK1_K20_TSC_XMH 0x82 /* TSC X- data MSB RO */
+#define APALIS_TK1_K20_TSC_XPL 0x83 /* TSC X+ data LSB RO */
+#define APALIS_TK1_K20_TSC_XPH 0x84 /* TSC X+ data MSB RO */
+#define APALIS_TK1_K20_TSC_YML 0x85 /* TSC Y- data LSB RO */
+#define APALIS_TK1_K20_TSC_YMH 0x86 /* TSC Y- data MSB RO */
+#define APALIS_TK1_K20_TSC_YPL 0x87 /* TSC Y+ data LSB RO */
+#define APALIS_TK1_K20_TSC_YPH 0x88 /* TSC Y+ data MSB RO */
+/* Bulk read of LSB register can be use to read entire 16-bit in one command */
+#define APALIS_TK1_K20_TSC_ENA BIT(0)
+#define APALIS_TK1_K20_TSC_ENA_MASK BIT(0)
+
+/* 0x89-0x8F Reserved */
+
+/* GPIO Registers */
+#define APALIS_TK1_K20_GPIOREG 0x90 /* GPIO control & status register RW */
+#define APALIS_TK1_K20_GPIO_NO 0x91 /* currently configured GPIO RW */
+#define APALIS_TK1_K20_GPIO_STA 0x92 /* Status register for the APALIS_TK1_K20_GPIO_NO GPIO RW */
+/* MSB | 0 ... 0 | VALUE | Output-1 / Input-0 | LSB */
+#define APALIS_TK1_K20_GPIO_STA_OE BIT(0)
+#define APALIS_TK1_K20_GPIO_STA_VAL BIT(1)
+
+/* 0x93-0xFD Reserved */
+#define APALIS_TK1_K20_RET_REQ 0xFE
+/* 0xFF Reserved */
+
+/* Interrupt flags */
+#define APALIS_TK1_K20_GEN_IRQ 0
+#define APALIS_TK1_K20_CAN0_IRQ 1
+#define APALIS_TK1_K20_CAN1_IRQ 2
+#define APALIS_TK1_K20_ADC_IRQ 3
+#define APALIS_TK1_K20_TSC_IRQ 4
+#define APALIS_TK1_K20_GPIO_IRQ 5
+
+#define APALIS_TK1_K20_FW_VER 0x0A
+
+#define FW_MINOR (APALIS_TK1_K20_FW_VER & 0x0F)
+#define FW_MAJOR ((APALIS_TK1_K20_FW_VER & 0xF0) >> 4)
+
+#define TK1_K20_SENTINEL 0x55
+#define TK1_K20_INVAL 0xAA
+
+#define APALIS_TK1_K20_NUMREGS 0x3f
+#define APALIS_TK1_K20_IRQ_REG_CNT 1
+#define APALIS_TK1_K20_IRQ_PER_REG 8
+
+#define APALIS_TK1_CAN_CLK_UNIT 6250
+
+#define APALIS_TK1_K20_MAX_SPI_SPEED 6000000
+
+struct apalis_tk1_k20_regmap {
+ struct regmap *regmap;
+
+ struct device *dev;
+
+ struct regmap_irq irqs[APALIS_TK1_K20_IRQ_REG_CNT * APALIS_TK1_K20_IRQ_PER_REG];
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct mutex lock;
+ int irq;
+ int flags;
+
+ int ezpcs_gpio;
+ int reset_gpio;
+ int appcs_gpio;
+ int int2_gpio;
+};
+
+void apalis_tk1_k20_lock(struct apalis_tk1_k20_regmap *apalis_tk1_k20);
+void apalis_tk1_k20_unlock(struct apalis_tk1_k20_regmap *apalis_tk1_k20);
+
+int apalis_tk1_k20_reg_read(struct apalis_tk1_k20_regmap *apalis_tk1_k20, unsigned int offset, u32 *val);
+int apalis_tk1_k20_reg_write(struct apalis_tk1_k20_regmap *apalis_tk1_k20, unsigned int offset, u32 val);
+int apalis_tk1_k20_reg_read_bulk(struct apalis_tk1_k20_regmap *apalis_tk1_k20, unsigned int offset,
+ uint8_t *val, size_t size);
+int apalis_tk1_k20_reg_write_bulk(struct apalis_tk1_k20_regmap *apalis_tk1_k20, unsigned int offset,
+ uint8_t *val, size_t size);
+int apalis_tk1_k20_reg_rmw(struct apalis_tk1_k20_regmap *apalis_tk1_k20, unsigned int offset,
+ u32 mask, u32 val);
+
+int apalis_tk1_k20_irq_mask(struct apalis_tk1_k20_regmap *apalis_tk1_k20, int irq);
+int apalis_tk1_k20_irq_unmask(struct apalis_tk1_k20_regmap *apalis_tk1_k20, int irq);
+int apalis_tk1_k20_irq_request(struct apalis_tk1_k20_regmap *apalis_tk1_k20, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int apalis_tk1_k20_irq_free(struct apalis_tk1_k20_regmap *apalis_tk1_k20, int irq, void *dev);
+
+int apalis_tk1_k20_irq_status(struct apalis_tk1_k20_regmap *apalis_tk1_k20, int irq,
+ int *enabled, int *pending);
+
+int apalis_tk1_k20_get_flags(struct apalis_tk1_k20_regmap *apalis_tk1_k20);
+
+struct apalis_tk1_k20_can_platform_data {
+ uint8_t id;
+ u16 status;
+};
+
+struct apalis_tk1_k20_tsc_platform_data {
+ u16 status;
+};
+
+struct apalis_tk1_k20_adc_platform_data {
+ u16 status;
+};
+
+struct apalis_tk1_k20_gpio_platform_data {
+ u16 status;
+};
+
+#define APALIS_TK1_K20_USES_TSC BIT(0)
+#define APALIS_TK1_K20_USES_ADC BIT(1)
+#define APALIS_TK1_K20_USES_CAN BIT(2)
+#define APALIS_TK1_K20_USES_GPIO BIT(3)
+
+struct apalis_tk1_k20_platform_data {
+ unsigned int flags;
+
+ struct apalis_tk1_k20_tsc_platform_data touch;
+ struct apalis_tk1_k20_adc_platform_data adc;
+ struct apalis_tk1_k20_can_platform_data can0;
+ struct apalis_tk1_k20_can_platform_data can1;
+ struct apalis_tk1_k20_gpio_platform_data gpio;
+
+ int ezpcs_gpio;
+ int reset_gpio;
+ int appcs_gpio;
+ int int2_gpio;
+};
+
+#define APALIS_TK1_K20_ADC_CHANNELS 4
+#define APALIS_TK1_K20_ADC_BITS 16
+#define APALIS_TK1_K20_VADC_MILI 3300
+
+enum apalis_tk1_k20_adc_id {
+ APALIS_TK1_K20_ADC1,
+ APALIS_TK1_K20_ADC2,
+ APALIS_TK1_K20_ADC3,
+ APALIS_TK1_K20_ADC4
+};
+
+#endif /* ifndef __LINUX_MFD_APALIS_TK1_K20_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fe10a0dab604..5745a7d490a2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -72,7 +72,13 @@
#include <linux/tegra_pm_domains.h>
#endif
+#ifndef CONFIG_MACH_APALIS_TK1
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+#else
+/* Hack to have HDA sound card as 2nd one in order to have SGTL5000 on our
+ modules show up as primary one */
+static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = 1};
+#endif
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
static char *model[SNDRV_CARDS];
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index 6484c80c6130..3033e440f1e4 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -286,6 +286,19 @@ config SND_SOC_TEGRA_RT5639
boards using the ALC5639 codec. Currently, the supported board
is Kai and Ardbeg.
+config SND_SOC_TEGRA_APALIS_TK1
+ tristate "SoC Audio support for Apalis TK1 module"
+ depends on SND_SOC_TEGRA && I2C && TEGRA_DC
+ select SND_SOC_TEGRA30_I2S if !ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_SPDIF if !ARCH_TEGRA_2x_SOC
+ select SND_SOC_SGTL5000
+ select SND_SOC_SPDIF
+ select SND_SOC_TEGRA30_AVP if !ARCH_TEGRA_2x_SOC
+ select SND_SOC_TEGRA30_DAM if !ARCH_TEGRA_2x_SOC
+ help
+ Say Y or M here if you want to add support for SoC audio on the
+ Toradex Apalis TK1 module.
+
config SND_SOC_TEGRA_RT5640
tristate "SoC Audio support for Tegra boards using a ALC5640 codec"
depends on SND_SOC_TEGRA && I2C && TEGRA_DC
diff --git a/sound/soc/tegra/Makefile b/sound/soc/tegra/Makefile
index 0434fa19132e..67a467d719f4 100644
--- a/sound/soc/tegra/Makefile
+++ b/sound/soc/tegra/Makefile
@@ -37,6 +37,7 @@ snd-soc-tegra-alc5632-objs := tegra_alc5632.o
snd-soc-tegra-max98088-objs := tegra_max98088.o
snd-soc-tegra-aic326x-objs := tegra_aic326x.o
snd-soc-tegra-aic325x-objs := tegra_aic325x.o
+snd-soc-tegra-apalis-tk1-objs := apalis-tk1.o
snd-soc-tegra-rt5640-objs := tegra_rt5640.o
snd-soc-tegra-rt5645-objs := tegra_rt5645.o
snd-soc-tegra-rt5639-objs := tegra_rt5639.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_SND_SOC_TEGRA_ALC5632) += snd-soc-tegra-alc5632.o
obj-$(CONFIG_SND_SOC_TEGRA_MAX98088) += snd-soc-tegra-max98088.o
obj-$(CONFIG_SND_SOC_TEGRA_TLV320AIC326X) += snd-soc-tegra-aic326x.o
obj-$(CONFIG_SND_SOC_TEGRA_TLV320AIC325X) += snd-soc-tegra-aic325x.o
+obj-$(CONFIG_SND_SOC_TEGRA_APALIS_TK1) += snd-soc-tegra-apalis-tk1.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5640) += snd-soc-tegra-rt5640.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5645) += snd-soc-tegra-rt5645.o
obj-$(CONFIG_SND_SOC_TEGRA_RT5639) += snd-soc-tegra-rt5639.o
diff --git a/sound/soc/tegra/apalis-tk1.c b/sound/soc/tegra/apalis-tk1.c
new file mode 100644
index 000000000000..4f08b1d32980
--- /dev/null
+++ b/sound/soc/tegra/apalis-tk1.c
@@ -0,0 +1,423 @@
+/*
+ * SoC audio driver for Toradex Apalis TK1
+ *
+ * Copyright (C) 2012-2016 Toradex Inc.
+ *
+ * 2012-02-12: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+ * initial version for Apalis/Colibri T30
+ *
+ * Copied from tegra_wm8903.c
+ * Copyright (C) 2010-2011 - NVIDIA, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/tegra_asoc_pdata.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../codecs/sgtl5000.h"
+
+#include "tegra_pcm.h"
+#include "tegra_asoc_utils.h"
+
+#define DRV_NAME "tegra-snd-apalis-tk1-sgtl5000"
+
+#define DAI_LINK_HIFI 0
+#define DAI_LINK_SPDIF 1
+#define NUM_DAI_LINKS 2
+
+struct apalis_tk1_sgtl5000 {
+ struct tegra_asoc_utils_data util_data;
+ struct tegra_asoc_platform_data *pdata;
+ enum snd_soc_bias_level bias_level;
+};
+
+static int apalis_tk1_sgtl5000_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_card *card = codec->card;
+ struct apalis_tk1_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+ struct tegra_asoc_platform_data *pdata = machine->pdata;
+ int srate, mclk, i2s_daifmt;
+ int err;
+ int rate;
+
+ /* sgtl5000 does not support 512*rate when in 96000 fs */
+ srate = params_rate(params);
+ switch (srate) {
+ case 96000:
+ mclk = 256 * srate;
+ break;
+ default:
+ mclk = 512 * srate;
+ break;
+ }
+
+ /* Sgtl5000 sysclk should be >= 8MHz and <= 27M */
+ if (mclk < 8000000 || mclk > 27000000)
+ return -EINVAL;
+
+ if (pdata->i2s_param[HIFI_CODEC].is_i2s_master)
+ i2s_daifmt = SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS;
+ else
+ i2s_daifmt = SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM;
+
+ /* Use DSP mode for mono on Tegra20 */
+ if (params_channels(params) != 2) {
+ i2s_daifmt |= SND_SOC_DAIFMT_DSP_A;
+ } else {
+ switch (pdata->i2s_param[HIFI_CODEC].i2s_mode) {
+ case TEGRA_DAIFMT_I2S:
+ i2s_daifmt |= SND_SOC_DAIFMT_I2S;
+ break;
+ case TEGRA_DAIFMT_DSP_A:
+ i2s_daifmt |= SND_SOC_DAIFMT_DSP_A;
+ break;
+ case TEGRA_DAIFMT_DSP_B:
+ i2s_daifmt |= SND_SOC_DAIFMT_DSP_B;
+ break;
+ case TEGRA_DAIFMT_LEFT_J:
+ i2s_daifmt |= SND_SOC_DAIFMT_LEFT_J;
+ break;
+ case TEGRA_DAIFMT_RIGHT_J:
+ i2s_daifmt |= SND_SOC_DAIFMT_RIGHT_J;
+ break;
+ default:
+ dev_err(card->dev, "Can't configure i2s format\n");
+ return -EINVAL;
+ }
+ }
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % mclk)) {
+ mclk = machine->util_data.set_mclk;
+ } else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ rate = clk_get_rate(machine->util_data.clk_cdev1);
+
+ err = snd_soc_dai_set_fmt(codec_dai, i2s_daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai fmt not set\n");
+ return err;
+ }
+
+ err = snd_soc_dai_set_fmt(cpu_dai, i2s_daifmt);
+ if (err < 0) {
+ dev_err(card->dev, "cpu_dai fmt not set\n");
+ return err;
+ }
+
+ if (pdata->i2s_param[HIFI_CODEC].is_i2s_master) {
+ /* Set SGTL5000's SYSCLK (provided by clk_out_1) */
+ err =
+ snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, rate,
+ SND_SOC_CLOCK_IN);
+ if (err < 0) {
+ dev_err(card->dev, "codec_dai clock not set\n");
+ return err;
+ }
+ }
+ /* else TBD */
+
+ return 0;
+}
+
+static int tegra_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct apalis_tk1_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+ int srate, mclk, min_mclk;
+ int err;
+
+ srate = params_rate(params);
+ switch (srate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ mclk = 11289600;
+ break;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 64000:
+ case 96000:
+ mclk = 12288000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ min_mclk = 128 * srate;
+
+ err = tegra_asoc_utils_set_rate(&machine->util_data, srate, mclk);
+ if (err < 0) {
+ if (!(machine->util_data.set_mclk % min_mclk))
+ mclk = machine->util_data.set_mclk;
+ else {
+ dev_err(card->dev, "Can't configure clocks\n");
+ return err;
+ }
+ }
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 1);
+
+ return 0;
+}
+
+static int tegra_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct apalis_tk1_sgtl5000 *machine =
+ snd_soc_card_get_drvdata(rtd->card);
+
+ tegra_asoc_utils_lock_clk_rate(&machine->util_data, 0);
+
+ return 0;
+}
+
+static struct snd_soc_ops apalis_tk1_sgtl5000_ops = {
+ .hw_params = apalis_tk1_sgtl5000_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+static struct snd_soc_ops tegra_spdif_ops = {
+ .hw_params = tegra_spdif_hw_params,
+ .hw_free = tegra_hw_free,
+};
+
+/* Apalis T30 machine DAPM widgets */
+static const struct snd_soc_dapm_widget apalis_tk1_sgtl5000_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+/* Apalis T30 machine audio map (connections to the codec pins) */
+static const struct snd_soc_dapm_route apalis_tk1_sgtl5000_dapm_route[] = {
+ /* Apalis MXM3 pin 306 (MIC)
+ Apalis Evaluation Board: Audio jack X26 bottom pink
+ Ixora: Audio jack X12 pin 4 */
+ /* TBD: mic bias GPIO handling */
+ {"Mic Jack", NULL, "MIC_IN"},
+
+ /* Apalis MXM3 pin 310 & 312 (LINEIN_L/R)
+ Apalis Evaluation Board: Audio jack X26 top blue
+ Ixora: Line IN – S/PDIF header X18 pin 6 & 7 */
+ {"Line In Jack", NULL, "LINE_IN"},
+
+ /* Apalis MXM3 pin 316 & 318 (HP_L/R)
+ Apalis Evaluation Board: Audio jack X26 middle green
+ Ixora: Audio jack X12 */
+ /* TBD: HP PGA handling */
+ {"Headphone Jack", NULL, "HP_OUT"},
+};
+
+static int apalis_tk1_sgtl5000_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ struct snd_soc_card *card = codec->card;
+ struct apalis_tk1_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ machine->bias_level = SND_SOC_BIAS_STANDBY;
+
+ ret = tegra_asoc_utils_register_ctls(&machine->util_data);
+ if (ret < 0)
+ return ret;
+
+ snd_soc_dapm_nc_pin(dapm, "LINE_OUT");
+
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static struct snd_soc_dai_link apalis_tk1_sgtl5000_dai[NUM_DAI_LINKS] = {
+ [DAI_LINK_HIFI] = {
+ .name = "SGTL5000",
+ .stream_name = "SGTL5000 PCM",
+ .codec_name = "sgtl5000.4-000a",
+ .platform_name = "tegra30-i2s.2",
+ .cpu_dai_name = "tegra30-i2s.2",
+ .codec_dai_name = "sgtl5000",
+ .init = apalis_tk1_sgtl5000_init,
+ .ops = &apalis_tk1_sgtl5000_ops,
+ },
+ [DAI_LINK_SPDIF] = {
+ .name = "SPDIF",
+ .stream_name = "SPDIF PCM",
+ .codec_name = "spdif-dit.0",
+ .platform_name = "tegra30-spdif",
+ .cpu_dai_name = "tegra30-spdif",
+ .codec_dai_name = "dit-hifi",
+ .ops = &tegra_spdif_ops,
+ },
+};
+
+static struct snd_soc_card snd_soc_apalis_tk1_sgtl5000 = {
+ .name = "Toradex Apalis TK1 SGTL5000",
+ .owner = THIS_MODULE,
+ .dai_link = apalis_tk1_sgtl5000_dai,
+ .num_links = ARRAY_SIZE(apalis_tk1_sgtl5000_dai),
+ .dapm_widgets = apalis_tk1_sgtl5000_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(apalis_tk1_sgtl5000_dapm_widgets),
+ .dapm_routes = apalis_tk1_sgtl5000_dapm_route,
+ .num_dapm_routes = ARRAY_SIZE(apalis_tk1_sgtl5000_dapm_route),
+ .fully_routed = true,
+};
+
+static int apalis_tk1_sgtl5000_driver_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_apalis_tk1_sgtl5000;
+ struct apalis_tk1_sgtl5000 *machine;
+ struct tegra_asoc_platform_data *pdata;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ if (pdata->codec_name)
+ card->dai_link->codec_name = pdata->codec_name;
+
+ if (pdata->codec_dai_name)
+ card->dai_link->codec_dai_name = pdata->codec_dai_name;
+
+ if (pdata->codec_name)
+ card->dai_link[DAI_LINK_HIFI].codec_name = pdata->codec_name;
+
+ if (pdata->codec_dai_name) {
+ card->dai_link[DAI_LINK_HIFI].codec_dai_name =
+ pdata->codec_dai_name;
+ }
+
+ machine = kzalloc(sizeof(struct apalis_tk1_sgtl5000), GFP_KERNEL);
+ if (!machine) {
+ dev_err(&pdev->dev,
+ "Can't allocate apalis_tk1_sgtl5000 struct\n");
+ return -ENOMEM;
+ }
+
+ machine->pdata = pdata;
+
+ ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev, card);
+ if (ret)
+ goto err_free_machine;
+
+ card->dev = &pdev->dev;
+ platform_set_drvdata(pdev, card);
+ snd_soc_card_set_drvdata(card, machine);
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ goto err_fini_utils;
+ }
+
+ if (!card->instantiated) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ goto err_unregister_card;
+ }
+
+ ret = tegra_asoc_utils_set_parent(&machine->util_data,
+ pdata->i2s_param[HIFI_CODEC].
+ is_i2s_master);
+ if (ret) {
+ dev_err(&pdev->dev, "tegra_asoc_utils_set_parent failed (%d)\n",
+ ret);
+ goto err_unregister_card;
+ }
+
+ return 0;
+
+ err_unregister_card:
+ snd_soc_unregister_card(card);
+ err_fini_utils:
+ tegra_asoc_utils_fini(&machine->util_data);
+ err_free_machine:
+ kfree(machine);
+ return ret;
+}
+
+static int apalis_tk1_sgtl5000_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct apalis_tk1_sgtl5000 *machine = snd_soc_card_get_drvdata(card);
+
+ snd_soc_unregister_card(card);
+
+ tegra_asoc_utils_fini(&machine->util_data);
+
+ kfree(machine);
+
+ return 0;
+}
+
+static struct platform_driver apalis_tk1_sgtl5000_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ },
+ .probe = apalis_tk1_sgtl5000_driver_probe,
+ .remove = apalis_tk1_sgtl5000_driver_remove,
+};
+
+static int __init apalis_tk1_sgtl5000_modinit(void)
+{
+ return platform_driver_register(&apalis_tk1_sgtl5000_driver);
+}
+module_init(apalis_tk1_sgtl5000_modinit);
+
+static void __exit apalis_tk1_sgtl5000_modexit(void)
+{
+ platform_driver_unregister(&apalis_tk1_sgtl5000_driver);
+}
+module_exit(apalis_tk1_sgtl5000_modexit);
+
+/* Module information */
+MODULE_AUTHOR("Marcel Ziswiler <marcel.ziswiler@toradex.com>");
+MODULE_DESCRIPTION("ALSA SoC SGTL5000 on Toradex Apalis TK1");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);