summaryrefslogtreecommitdiff
path: root/drivers/net/igb/igb_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_ethtool.c')
-rw-r--r--drivers/net/igb/igb_ethtool.c2214
1 files changed, 1533 insertions, 681 deletions
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index c9e3d65bd71d..dc3cacc1a9c9 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -27,18 +27,27 @@
/* ethtool support for igb */
-#include <linux/vmalloc.h>
#include <linux/netdevice.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
+#include <linux/vmalloc.h>
+
+#ifdef SIOCETHTOOL
#include <linux/ethtool.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+#include <linux/highmem.h>
#include "igb.h"
-
+#include "igb_regtest.h"
+#include <linux/if_vlan.h>
+#ifdef ETHTOOL_GEEE
+#include <linux/mdio.h>
+#endif
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+#ifdef ETHTOOL_GSTATS
struct igb_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -50,6 +59,7 @@ struct igb_stats {
.sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
.stat_offset = offsetof(struct igb_adapter, _stat) \
}
+
static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("rx_packets", stats.gprc),
IGB_STAT("tx_packets", stats.gptc),
@@ -83,6 +93,10 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
IGB_STAT("rx_long_byte_count", stats.gorc),
IGB_STAT("tx_dma_out_of_sync", stats.doosync),
+#ifndef IGB_NO_LRO
+ IGB_STAT("lro_aggregated", lro_stats.coal),
+ IGB_STAT("lro_flushed", lro_stats.flushed),
+#endif /* IGB_LRO */
IGB_STAT("tx_smbus", stats.mgptc),
IGB_STAT("rx_smbus", stats.mgprc),
IGB_STAT("dropped_smbus", stats.mgpdc),
@@ -90,13 +104,18 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+#ifdef HAVE_PTP_1588_CLOCK
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+#endif /* HAVE_PTP_1588_CLOCK */
};
#define IGB_NETDEV_STAT(_net_stat) { \
- .stat_string = __stringify(_net_stat), \
- .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
- .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+ .stat_string = #_net_stat, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
+
static const struct igb_stats igb_gstrings_net_stats[] = {
IGB_NETDEV_STAT(rx_errors),
IGB_NETDEV_STAT(tx_errors),
@@ -109,15 +128,12 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
IGB_NETDEV_STAT(tx_heartbeat_errors)
};
-#define IGB_GLOBAL_STATS_LEN \
- (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
-#define IGB_NETDEV_STATS_LEN \
- (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
+#define IGB_GLOBAL_STATS_LEN ARRAY_SIZE(igb_gstrings_stats)
+#define IGB_NETDEV_STATS_LEN ARRAY_SIZE(igb_gstrings_net_stats)
#define IGB_RX_QUEUE_STATS_LEN \
(sizeof(struct igb_rx_queue_stats) / sizeof(u64))
-
-#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
-
+#define IGB_TX_QUEUE_STATS_LEN \
+ (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
#define IGB_QUEUE_STATS_LEN \
((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
IGB_RX_QUEUE_STATS_LEN) + \
@@ -126,12 +142,16 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
#define IGB_STATS_LEN \
(IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
@@ -147,7 +167,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_TP |
+ SUPPORTED_Pause);
ecmd->advertising = ADVERTISED_TP;
if (hw->mac.autoneg == 1) {
@@ -158,43 +179,102 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy.addr;
+ ecmd->transceiver = XCVR_INTERNAL;
+
} else {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE |
- SUPPORTED_Autoneg);
+ ecmd->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause);
+ if (hw->mac.type == e1000_i354)
+ ecmd->supported |= (SUPPORTED_2500baseX_Full);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ ecmd->advertising = ADVERTISED_FIBRE;
+
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ ecmd->advertising = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ if (hw->mac.autoneg == 1)
+ ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
}
- ecmd->transceiver = XCVR_INTERNAL;
+ if (hw->mac.autoneg != 1)
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+
+ if (hw->fc.requested_mode == e1000_fc_full)
+ ecmd->advertising |= ADVERTISED_Pause;
+ else if (hw->fc.requested_mode == e1000_fc_rx_pause)
+ ecmd->advertising |= (ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ else if (hw->fc.requested_mode == e1000_fc_tx_pause)
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ else
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
- status = rd32(E1000_STATUS);
+ status = E1000_READ_REG(hw, E1000_STATUS);
if (status & E1000_STATUS_LU) {
-
- if ((status & E1000_STATUS_SPEED_1000) ||
- hw->phy.media_type != e1000_media_type_copper)
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
+ if ((hw->mac.type == e1000_i354) &&
+ (status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER))
+ ecmd->speed = SPEED_2500;
+ else if (status & E1000_STATUS_SPEED_1000)
+ ecmd->speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ethtool_cmd_speed_set(ecmd, SPEED_100);
+ ecmd->speed = SPEED_100;
else
- ethtool_cmd_speed_set(ecmd, SPEED_10);
+ ecmd->speed = SPEED_10;
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
+
} else {
- ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->speed = -1;
ecmd->duplex = -1;
}
- ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ if ((hw->phy.media_type == e1000_media_type_fiber) ||
+ hw->mac.autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+#ifdef ETH_TP_MDI_X
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+#ifdef ETH_TP_MDI_AUTO
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
+#endif
+#endif /* ETH_TP_MDI_X */
return 0;
}
@@ -203,33 +283,95 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ if (ecmd->duplex == DUPLEX_HALF) {
+ if (!hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE disabled: not supported with half duplex\n");
+ hw->dev_spec._82575.eee_disable = true;
+ } else {
+ if (hw->dev_spec._82575.eee_disable)
+ dev_info(pci_dev_to_dev(adapter->pdev), "EEE enabled\n");
+ hw->dev_spec._82575.eee_disable = false;
+ }
+
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
- if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev, "Cannot change link "
+ if (e1000_check_reset_block(hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev), "Cannot change link "
"characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
+#ifdef ETH_TP_MDI_AUTO
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_FIBRE |
+ ADVERTISED_Autoneg;
+ switch (adapter->link_speed) {
+ case SPEED_2500:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_2500baseX_Full;
+ break;
+ case SPEED_1000:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_1000baseT_Full;
+ break;
+ case SPEED_100:
+ hw->phy.autoneg_advertised =
+ ADVERTISED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+ } else {
+ hw->phy.autoneg_advertised = ecmd->advertising |
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
+ }
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- u32 speed = ethtool_cmd_speed(ecmd);
- if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
+ if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
+#ifdef ETH_TP_MDI_AUTO
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /* fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
+#endif /* ETH_TP_MDI_AUTO */
/* reset the link */
if (netif_running(adapter->netdev)) {
igb_down(adapter);
@@ -288,7 +430,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -310,10 +452,18 @@ static int igb_set_pauseparam(struct net_device *netdev,
hw->fc.current_mode = hw->fc.requested_mode;
- retval = ((hw->phy.media_type == e1000_media_type_copper) ?
- igb_force_mac_fc(hw) : igb_setup_link(hw));
+ if (hw->phy.media_type == e1000_media_type_fiber) {
+ retval = hw->mac.ops.setup_link(hw);
+ /* implicit goto out */
+ } else {
+ retval = e1000_force_mac_fc(hw);
+ if (retval)
+ goto out;
+ e1000_set_fc_watermarks_generic(hw);
+ }
}
+out:
clear_bit(__IGB_RESETTING, &adapter->state);
return retval;
}
@@ -332,7 +482,7 @@ static void igb_set_msglevel(struct net_device *netdev, u32 data)
static int igb_get_regs_len(struct net_device *netdev)
{
-#define IGB_REGS_LEN 551
+#define IGB_REGS_LEN 555
return IGB_REGS_LEN * sizeof(u32);
}
@@ -349,78 +499,78 @@ static void igb_get_regs(struct net_device *netdev,
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
- regs_buff[0] = rd32(E1000_CTRL);
- regs_buff[1] = rd32(E1000_STATUS);
- regs_buff[2] = rd32(E1000_CTRL_EXT);
- regs_buff[3] = rd32(E1000_MDIC);
- regs_buff[4] = rd32(E1000_SCTL);
- regs_buff[5] = rd32(E1000_CONNSW);
- regs_buff[6] = rd32(E1000_VET);
- regs_buff[7] = rd32(E1000_LEDCTL);
- regs_buff[8] = rd32(E1000_PBA);
- regs_buff[9] = rd32(E1000_PBS);
- regs_buff[10] = rd32(E1000_FRTIMER);
- regs_buff[11] = rd32(E1000_TCPTIMER);
+ regs_buff[0] = E1000_READ_REG(hw, E1000_CTRL);
+ regs_buff[1] = E1000_READ_REG(hw, E1000_STATUS);
+ regs_buff[2] = E1000_READ_REG(hw, E1000_CTRL_EXT);
+ regs_buff[3] = E1000_READ_REG(hw, E1000_MDIC);
+ regs_buff[4] = E1000_READ_REG(hw, E1000_SCTL);
+ regs_buff[5] = E1000_READ_REG(hw, E1000_CONNSW);
+ regs_buff[6] = E1000_READ_REG(hw, E1000_VET);
+ regs_buff[7] = E1000_READ_REG(hw, E1000_LEDCTL);
+ regs_buff[8] = E1000_READ_REG(hw, E1000_PBA);
+ regs_buff[9] = E1000_READ_REG(hw, E1000_PBS);
+ regs_buff[10] = E1000_READ_REG(hw, E1000_FRTIMER);
+ regs_buff[11] = E1000_READ_REG(hw, E1000_TCPTIMER);
/* NVM Register */
- regs_buff[12] = rd32(E1000_EECD);
+ regs_buff[12] = E1000_READ_REG(hw, E1000_EECD);
/* Interrupt */
/* Reading EICS for EICR because they read the
* same but EICS does not clear on read */
- regs_buff[13] = rd32(E1000_EICS);
- regs_buff[14] = rd32(E1000_EICS);
- regs_buff[15] = rd32(E1000_EIMS);
- regs_buff[16] = rd32(E1000_EIMC);
- regs_buff[17] = rd32(E1000_EIAC);
- regs_buff[18] = rd32(E1000_EIAM);
+ regs_buff[13] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[14] = E1000_READ_REG(hw, E1000_EICS);
+ regs_buff[15] = E1000_READ_REG(hw, E1000_EIMS);
+ regs_buff[16] = E1000_READ_REG(hw, E1000_EIMC);
+ regs_buff[17] = E1000_READ_REG(hw, E1000_EIAC);
+ regs_buff[18] = E1000_READ_REG(hw, E1000_EIAM);
/* Reading ICS for ICR because they read the
* same but ICS does not clear on read */
- regs_buff[19] = rd32(E1000_ICS);
- regs_buff[20] = rd32(E1000_ICS);
- regs_buff[21] = rd32(E1000_IMS);
- regs_buff[22] = rd32(E1000_IMC);
- regs_buff[23] = rd32(E1000_IAC);
- regs_buff[24] = rd32(E1000_IAM);
- regs_buff[25] = rd32(E1000_IMIRVP);
+ regs_buff[19] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[20] = E1000_READ_REG(hw, E1000_ICS);
+ regs_buff[21] = E1000_READ_REG(hw, E1000_IMS);
+ regs_buff[22] = E1000_READ_REG(hw, E1000_IMC);
+ regs_buff[23] = E1000_READ_REG(hw, E1000_IAC);
+ regs_buff[24] = E1000_READ_REG(hw, E1000_IAM);
+ regs_buff[25] = E1000_READ_REG(hw, E1000_IMIRVP);
/* Flow Control */
- regs_buff[26] = rd32(E1000_FCAL);
- regs_buff[27] = rd32(E1000_FCAH);
- regs_buff[28] = rd32(E1000_FCTTV);
- regs_buff[29] = rd32(E1000_FCRTL);
- regs_buff[30] = rd32(E1000_FCRTH);
- regs_buff[31] = rd32(E1000_FCRTV);
+ regs_buff[26] = E1000_READ_REG(hw, E1000_FCAL);
+ regs_buff[27] = E1000_READ_REG(hw, E1000_FCAH);
+ regs_buff[28] = E1000_READ_REG(hw, E1000_FCTTV);
+ regs_buff[29] = E1000_READ_REG(hw, E1000_FCRTL);
+ regs_buff[30] = E1000_READ_REG(hw, E1000_FCRTH);
+ regs_buff[31] = E1000_READ_REG(hw, E1000_FCRTV);
/* Receive */
- regs_buff[32] = rd32(E1000_RCTL);
- regs_buff[33] = rd32(E1000_RXCSUM);
- regs_buff[34] = rd32(E1000_RLPML);
- regs_buff[35] = rd32(E1000_RFCTL);
- regs_buff[36] = rd32(E1000_MRQC);
- regs_buff[37] = rd32(E1000_VT_CTL);
+ regs_buff[32] = E1000_READ_REG(hw, E1000_RCTL);
+ regs_buff[33] = E1000_READ_REG(hw, E1000_RXCSUM);
+ regs_buff[34] = E1000_READ_REG(hw, E1000_RLPML);
+ regs_buff[35] = E1000_READ_REG(hw, E1000_RFCTL);
+ regs_buff[36] = E1000_READ_REG(hw, E1000_MRQC);
+ regs_buff[37] = E1000_READ_REG(hw, E1000_VT_CTL);
/* Transmit */
- regs_buff[38] = rd32(E1000_TCTL);
- regs_buff[39] = rd32(E1000_TCTL_EXT);
- regs_buff[40] = rd32(E1000_TIPG);
- regs_buff[41] = rd32(E1000_DTXCTL);
+ regs_buff[38] = E1000_READ_REG(hw, E1000_TCTL);
+ regs_buff[39] = E1000_READ_REG(hw, E1000_TCTL_EXT);
+ regs_buff[40] = E1000_READ_REG(hw, E1000_TIPG);
+ regs_buff[41] = E1000_READ_REG(hw, E1000_DTXCTL);
/* Wake Up */
- regs_buff[42] = rd32(E1000_WUC);
- regs_buff[43] = rd32(E1000_WUFC);
- regs_buff[44] = rd32(E1000_WUS);
- regs_buff[45] = rd32(E1000_IPAV);
- regs_buff[46] = rd32(E1000_WUPL);
+ regs_buff[42] = E1000_READ_REG(hw, E1000_WUC);
+ regs_buff[43] = E1000_READ_REG(hw, E1000_WUFC);
+ regs_buff[44] = E1000_READ_REG(hw, E1000_WUS);
+ regs_buff[45] = E1000_READ_REG(hw, E1000_IPAV);
+ regs_buff[46] = E1000_READ_REG(hw, E1000_WUPL);
/* MAC */
- regs_buff[47] = rd32(E1000_PCS_CFG0);
- regs_buff[48] = rd32(E1000_PCS_LCTL);
- regs_buff[49] = rd32(E1000_PCS_LSTAT);
- regs_buff[50] = rd32(E1000_PCS_ANADV);
- regs_buff[51] = rd32(E1000_PCS_LPAB);
- regs_buff[52] = rd32(E1000_PCS_NPTX);
- regs_buff[53] = rd32(E1000_PCS_LPABNP);
+ regs_buff[47] = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ regs_buff[48] = E1000_READ_REG(hw, E1000_PCS_LCTL);
+ regs_buff[49] = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+ regs_buff[50] = E1000_READ_REG(hw, E1000_PCS_ANADV);
+ regs_buff[51] = E1000_READ_REG(hw, E1000_PCS_LPAB);
+ regs_buff[52] = E1000_READ_REG(hw, E1000_PCS_NPTX);
+ regs_buff[53] = E1000_READ_REG(hw, E1000_PCS_LPABNP);
/* Statistics */
regs_buff[54] = adapter->stats.crcerrs;
@@ -486,70 +636,69 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[120] = adapter->stats.hrmpc;
for (i = 0; i < 4; i++)
- regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
+ regs_buff[121 + i] = E1000_READ_REG(hw, E1000_SRRCTL(i));
for (i = 0; i < 4; i++)
- regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
+ regs_buff[125 + i] = E1000_READ_REG(hw, E1000_PSRTYPE(i));
for (i = 0; i < 4; i++)
- regs_buff[129 + i] = rd32(E1000_RDBAL(i));
+ regs_buff[129 + i] = E1000_READ_REG(hw, E1000_RDBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[133 + i] = rd32(E1000_RDBAH(i));
+ regs_buff[133 + i] = E1000_READ_REG(hw, E1000_RDBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[137 + i] = rd32(E1000_RDLEN(i));
+ regs_buff[137 + i] = E1000_READ_REG(hw, E1000_RDLEN(i));
for (i = 0; i < 4; i++)
- regs_buff[141 + i] = rd32(E1000_RDH(i));
+ regs_buff[141 + i] = E1000_READ_REG(hw, E1000_RDH(i));
for (i = 0; i < 4; i++)
- regs_buff[145 + i] = rd32(E1000_RDT(i));
+ regs_buff[145 + i] = E1000_READ_REG(hw, E1000_RDT(i));
for (i = 0; i < 4; i++)
- regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
+ regs_buff[149 + i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
for (i = 0; i < 10; i++)
- regs_buff[153 + i] = rd32(E1000_EITR(i));
+ regs_buff[153 + i] = E1000_READ_REG(hw, E1000_EITR(i));
for (i = 0; i < 8; i++)
- regs_buff[163 + i] = rd32(E1000_IMIR(i));
+ regs_buff[163 + i] = E1000_READ_REG(hw, E1000_IMIR(i));
for (i = 0; i < 8; i++)
- regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
+ regs_buff[171 + i] = E1000_READ_REG(hw, E1000_IMIREXT(i));
for (i = 0; i < 16; i++)
- regs_buff[179 + i] = rd32(E1000_RAL(i));
+ regs_buff[179 + i] = E1000_READ_REG(hw, E1000_RAL(i));
for (i = 0; i < 16; i++)
- regs_buff[195 + i] = rd32(E1000_RAH(i));
+ regs_buff[195 + i] = E1000_READ_REG(hw, E1000_RAH(i));
for (i = 0; i < 4; i++)
- regs_buff[211 + i] = rd32(E1000_TDBAL(i));
+ regs_buff[211 + i] = E1000_READ_REG(hw, E1000_TDBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[215 + i] = rd32(E1000_TDBAH(i));
+ regs_buff[215 + i] = E1000_READ_REG(hw, E1000_TDBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[219 + i] = rd32(E1000_TDLEN(i));
+ regs_buff[219 + i] = E1000_READ_REG(hw, E1000_TDLEN(i));
for (i = 0; i < 4; i++)
- regs_buff[223 + i] = rd32(E1000_TDH(i));
+ regs_buff[223 + i] = E1000_READ_REG(hw, E1000_TDH(i));
for (i = 0; i < 4; i++)
- regs_buff[227 + i] = rd32(E1000_TDT(i));
+ regs_buff[227 + i] = E1000_READ_REG(hw, E1000_TDT(i));
for (i = 0; i < 4; i++)
- regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
+ regs_buff[231 + i] = E1000_READ_REG(hw, E1000_TXDCTL(i));
for (i = 0; i < 4; i++)
- regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
+ regs_buff[235 + i] = E1000_READ_REG(hw, E1000_TDWBAL(i));
for (i = 0; i < 4; i++)
- regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
+ regs_buff[239 + i] = E1000_READ_REG(hw, E1000_TDWBAH(i));
for (i = 0; i < 4; i++)
- regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
+ regs_buff[243 + i] = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
for (i = 0; i < 4; i++)
- regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
+ regs_buff[247 + i] = E1000_READ_REG(hw, E1000_IP4AT_REG(i));
for (i = 0; i < 4; i++)
- regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
+ regs_buff[251 + i] = E1000_READ_REG(hw, E1000_IP6AT_REG(i));
for (i = 0; i < 32; i++)
- regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
+ regs_buff[255 + i] = E1000_READ_REG(hw, E1000_WUPM_REG(i));
for (i = 0; i < 128; i++)
- regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
+ regs_buff[287 + i] = E1000_READ_REG(hw, E1000_FFMT_REG(i));
for (i = 0; i < 128; i++)
- regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
+ regs_buff[415 + i] = E1000_READ_REG(hw, E1000_FFVT_REG(i));
for (i = 0; i < 4; i++)
- regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
-
- regs_buff[547] = rd32(E1000_TDFH);
- regs_buff[548] = rd32(E1000_TDFT);
- regs_buff[549] = rd32(E1000_TDFHS);
- regs_buff[550] = rd32(E1000_TDFPC);
+ regs_buff[543 + i] = E1000_READ_REG(hw, E1000_FFLT_REG(i));
+ regs_buff[547] = E1000_READ_REG(hw, E1000_TDFH);
+ regs_buff[548] = E1000_READ_REG(hw, E1000_TDFT);
+ regs_buff[549] = E1000_READ_REG(hw, E1000_TDFHS);
+ regs_buff[550] = E1000_READ_REG(hw, E1000_TDFPC);
if (hw->mac.type > e1000_82580) {
regs_buff[551] = adapter->stats.o2bgptc;
regs_buff[552] = adapter->stats.b2ospc;
@@ -588,13 +737,13 @@ static int igb_get_eeprom(struct net_device *netdev,
return -ENOMEM;
if (hw->nvm.type == e1000_nvm_eeprom_spi)
- ret_val = hw->nvm.ops.read(hw, first_word,
- last_word - first_word + 1,
- eeprom_buff);
+ ret_val = e1000_read_nvm(hw, first_word,
+ last_word - first_word + 1,
+ eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ ret_val = e1000_read_nvm(hw, first_word + i, 1,
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -602,7 +751,7 @@ static int igb_get_eeprom(struct net_device *netdev,
/* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ eeprom_buff[i] = le16_to_cpu(eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len);
@@ -624,9 +773,6 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (hw->mac.type == e1000_i211)
- return -EOPNOTSUPP;
-
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
@@ -643,15 +789,15 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
- ret_val = hw->nvm.ops.read(hw, first_word, 1,
+ ret_val = e1000_read_nvm(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
- ret_val = hw->nvm.ops.read(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ ret_val = e1000_read_nvm(hw, last_word, 1,
+ &eeprom_buff[last_word - first_word]);
}
/* Device's eeprom is always little-endian, word addressable */
@@ -661,15 +807,15 @@ static int igb_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
- ret_val = hw->nvm.ops.write(hw, first_word,
- last_word - first_word + 1, eeprom_buff);
+ ret_val = e1000_write_nvm(hw, first_word,
+ last_word - first_word + 1, eeprom_buff);
- /* Update the checksum over the first part of the EEPROM if needed
+ /* Update the checksum if write succeeded.
* and flush shadow RAM for 82573 controllers */
- if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
- hw->nvm.ops.update(hw);
+ if (ret_val == 0)
+ e1000_update_nvm_checksum(hw);
kfree(eeprom_buff);
return ret_val;
@@ -679,25 +825,13 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- u16 eeprom_data;
strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version,
- sizeof(drvinfo->version) - 1);
-
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- sprintf(firmware_version, "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
-
- strncpy(drvinfo->fw_version, firmware_version,
+ strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
+
+ strncpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -730,12 +864,12 @@ static int igb_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
- new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
+ new_rx_count = min(ring->rx_pending, (u32)IGB_MAX_RXD);
+ new_rx_count = max(new_rx_count, (u16)IGB_MIN_RXD);
new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
- new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
- new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
+ new_tx_count = min(ring->tx_pending, (u32)IGB_MAX_TXD);
+ new_tx_count = max(new_tx_count, (u16)IGB_MIN_TXD);
new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring_count) &&
@@ -745,7 +879,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -834,221 +968,6 @@ clear_reset:
return err;
}
-/* ethtool register test data */
-struct igb_reg_test {
- u16 reg;
- u16 reg_offset;
- u16 array_len;
- u16 test_type;
- u32 mask;
- u32 write;
-};
-
-/* In the hardware, registers are laid out either singly, in arrays
- * spaced 0x100 bytes apart, or in contiguous tables. We assume
- * most tests take place on arrays or single registers (handled
- * as a single-element array) and special-case the tables.
- * Table tests are always pattern tests.
- *
- * We also make provision for some required setup steps by specifying
- * registers to be written without any read-back testing.
- */
-
-#define PATTERN_TEST 1
-#define SET_READ_TEST 2
-#define WRITE_NO_TEST 3
-#define TABLE32_TEST 4
-#define TABLE64_TEST_LO 5
-#define TABLE64_TEST_HI 6
-
-/* i210 reg test */
-static struct igb_reg_test reg_test_i210[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i210, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x900FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0, 0 }
-};
-
-/* i350 reg test */
-static struct igb_reg_test reg_test_i350[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* RDH is read-only for i350, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 16, TABLE64_TEST_HI,
- 0xC3FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82580 reg test */
-static struct igb_reg_test reg_test_82580[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* RDH is read-only for 82580, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI,
- 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST,
- 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82576 reg test */
-static struct igb_reg_test reg_test_82576[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82576, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
-/* 82575 register test */
-static struct igb_reg_test reg_test_82575[] = {
- { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
- { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- /* RDH is read-only for 82575, only test RDT. */
- { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
- { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
- { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
- { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
- { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
- { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RA, 0, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { 0, 0, 0, 0 }
-};
-
static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
@@ -1057,13 +976,13 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
static const u32 _test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
- wr32(reg, (_test[pat] & write));
- val = rd32(reg) & mask;
+ E1000_WRITE_REG(hw, reg, (_test[pat] & write));
+ val = E1000_READ_REG(hw, reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev, "pattern test reg %04X "
+ dev_err(pci_dev_to_dev(adapter->pdev), "pattern test reg %04X "
"failed: got 0x%08X expected 0x%08X\n",
- reg, val, (_test[pat] & write & mask));
- *data = reg;
+ E1000_REGISTER(hw, reg), val, (_test[pat] & write & mask));
+ *data = E1000_REGISTER(hw, reg);
return 1;
}
}
@@ -1076,13 +995,13 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
- wr32(reg, write & mask);
- val = rd32(reg);
+ E1000_WRITE_REG(hw, reg, write & mask);
+ val = E1000_READ_REG(hw, reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
+ dev_err(pci_dev_to_dev(adapter->pdev), "set/check reg %04X test failed:"
" got 0x%08X expected 0x%08X\n", reg,
(val & mask), (write & mask));
- *data = reg;
+ *data = E1000_REGISTER(hw, reg);
return 1;
}
@@ -1110,6 +1029,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
switch (adapter->hw.mac.type) {
case e1000_i350:
+ case e1000_i354:
test = reg_test_i350;
toggle = 0x7FEFF3FF;
break;
@@ -1137,18 +1057,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
* tests. Some bits are read-only, some toggle, and some
* are writable on newer MACs.
*/
- before = rd32(E1000_STATUS);
- value = (rd32(E1000_STATUS) & toggle);
- wr32(E1000_STATUS, toggle);
- after = rd32(E1000_STATUS) & toggle;
+ before = E1000_READ_REG(hw, E1000_STATUS);
+ value = (E1000_READ_REG(hw, E1000_STATUS) & toggle);
+ E1000_WRITE_REG(hw, E1000_STATUS, toggle);
+ after = E1000_READ_REG(hw, E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev, "failed STATUS register test "
+ dev_err(pci_dev_to_dev(adapter->pdev), "failed STATUS register test "
"got: 0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
/* restore previous status */
- wr32(E1000_STATUS, before);
+ E1000_WRITE_REG(hw, E1000_STATUS, before);
/* Perform the remainder of the register test, looping through
* the test table until we either fail or reach the null entry.
@@ -1170,7 +1090,7 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
break;
case WRITE_NO_TEST:
writel(test->write,
- (adapter->hw.hw_addr + test->reg)
+ (adapter->hw.hw_addr + test->reg)
+ (i * test->reg_offset));
break;
case TABLE32_TEST:
@@ -1201,11 +1121,9 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
- if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
- *data = 2;
- }
+ /* Validate NVM checksum */
+ if (e1000_validate_nvm_checksum(&adapter->hw) < 0)
+ *data = 2;
return *data;
}
@@ -1215,7 +1133,7 @@ static irqreturn_t igb_test_intr(int irq, void *data)
struct igb_adapter *adapter = (struct igb_adapter *) data;
struct e1000_hw *hw = &adapter->hw;
- adapter->test_icr |= rd32(E1000_ICR);
+ adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
return IRQ_HANDLED;
}
@@ -1224,7 +1142,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 mask, ics_mask, i = 0, shared_int = true;
+ u32 mask, ics_mask, i = 0, shared_int = TRUE;
u32 irq = adapter->pdev->irq;
*data = 0;
@@ -1232,12 +1150,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->msix_entries) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ &igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
- shared_int = false;
+ shared_int = FALSE;
if (request_irq(irq,
igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
@@ -1245,19 +1163,19 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
}
} else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
netdev->name, adapter)) {
- shared_int = false;
- } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
+ shared_int = FALSE;
+ } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
netdev->name, adapter)) {
*data = 1;
return -1;
}
- dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
- (shared_int ? "shared" : "unshared"));
+ dev_info(pci_dev_to_dev(adapter->pdev), "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
- wr32(E1000_IMC, ~0);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1271,9 +1189,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
ics_mask = 0x77DCFED5;
break;
case e1000_i350:
+ case e1000_i354:
+ ics_mask = 0x77DCFED5;
+ break;
case e1000_i210:
case e1000_i211:
- ics_mask = 0x77DCFED5;
+ ics_mask = 0x774CFED5;
break;
default:
ics_mask = 0x7FFFFFFF;
@@ -1298,12 +1219,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMC, mask);
- wr32(E1000_ICS, mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1320,12 +1241,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMS, mask);
- wr32(E1000_ICS, mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMS, mask);
+ E1000_WRITE_REG(hw, E1000_ICS, mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1342,12 +1263,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
adapter->test_icr = 0;
/* Flush any pending interrupts */
- wr32(E1000_ICR, ~0);
+ E1000_WRITE_REG(hw, E1000_ICR, ~0);
- wr32(E1000_IMC, ~mask);
- wr32(E1000_ICS, ~mask);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~mask);
+ E1000_WRITE_REG(hw, E1000_ICS, ~mask);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1357,9 +1278,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
}
/* Disable all the interrupts */
- wr32(E1000_IMC, ~0);
- wrfl();
- msleep(10);
+ E1000_WRITE_REG(hw, E1000_IMC, ~0);
+ E1000_WRITE_FLUSH(hw);
+ usleep_range(10000, 20000);
/* Unhook test interrupt handler */
if (adapter->msix_entries)
@@ -1385,7 +1306,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
tx_ring->count = IGB_DEFAULT_TXD;
- tx_ring->dev = &adapter->pdev->dev;
+ tx_ring->dev = pci_dev_to_dev(adapter->pdev);
tx_ring->netdev = adapter->netdev;
tx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1399,24 +1320,26 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rx_ring->count = IGB_DEFAULT_RXD;
- rx_ring->dev = &adapter->pdev->dev;
+ rx_ring->dev = pci_dev_to_dev(adapter->pdev);
rx_ring->netdev = adapter->netdev;
- rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ rx_ring->rx_buffer_len = IGB_RX_HDR_LEN;
+#endif
rx_ring->reg_idx = adapter->vfs_allocated_count;
if (igb_setup_rx_resources(rx_ring)) {
- ret_val = 3;
+ ret_val = 2;
goto err_nomem;
}
/* set the default queue to queue 0 of PF */
- wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
+ E1000_WRITE_REG(hw, E1000_MRQC, adapter->vfs_allocated_count << 3);
/* enable receive ring */
igb_setup_rctl(adapter);
igb_configure_rx_ring(adapter, rx_ring);
- igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
+ igb_alloc_rx_buffers(rx_ring, igb_desc_unused(rx_ring));
return 0;
@@ -1430,50 +1353,44 @@ static void igb_phy_disable_receiver(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
- igb_write_phy_reg(hw, 29, 0x001F);
- igb_write_phy_reg(hw, 30, 0x8FFC);
- igb_write_phy_reg(hw, 29, 0x001A);
- igb_write_phy_reg(hw, 30, 0x8FF0);
+ e1000_write_phy_reg(hw, 29, 0x001F);
+ e1000_write_phy_reg(hw, 30, 0x8FFC);
+ e1000_write_phy_reg(hw, 29, 0x001A);
+ e1000_write_phy_reg(hw, 30, 0x8FF0);
}
+
static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u16 phy_reg = 0;
-
- hw->mac.autoneg = false;
-
- switch (hw->phy.type) {
- case e1000_phy_m88:
- /* Auto-MDI/MDIX Off */
- igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
- /* reset to update Auto-MDI/MDIX */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
- /* autoneg off */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
- break;
- case e1000_phy_82580:
+
+ hw->mac.autoneg = FALSE;
+
+ if (hw->phy.type == e1000_phy_m88) {
+ if (hw->phy.id != I210_I_PHY_ID) {
+ /* Auto-MDI/MDIX Off */
+ e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
+ /* reset to update Auto-MDI/MDIX */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x9140);
+ /* autoneg off */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x8140);
+ } else {
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ }
+ } else {
/* enable MII loopback */
- igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
- break;
- case e1000_phy_i210:
- /* set loopback speed in PHY */
- igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
- &phy_reg);
- phy_reg |= GS40G_MAC_SPEED_1G;
- igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2),
- phy_reg);
- ctrl_reg = rd32(E1000_CTRL_EXT);
- default:
- break;
+ if (hw->phy.type == e1000_phy_82580)
+ e1000_write_phy_reg(hw, I82577_PHY_LBK_CTRL, 0x8041);
}
- /* force 1000, set loopback */
- igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
+ /* force 1000, set loopback */
+ e1000_write_phy_reg(hw, PHY_CONTROL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
- ctrl_reg = rd32(E1000_CTRL);
+ ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
@@ -1481,19 +1398,18 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
E1000_CTRL_FD | /* Force Duplex to FULL */
E1000_CTRL_SLU); /* Set link up enable bit */
- if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+ if (hw->phy.type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
- wr32(E1000_CTRL, ctrl_reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
- if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210))
+ if (hw->phy.type == e1000_phy_m88)
igb_phy_disable_receiver(adapter);
- udelay(500);
-
+ mdelay(500);
return 0;
}
@@ -1507,54 +1423,64 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 reg;
- reg = rd32(E1000_CTRL_EXT);
+ reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
/* use CTRL_EXT to identify link type as SGMII can appear as copper */
if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
/* Enable DH89xxCC MPHY for near end loopback */
- reg = rd32(E1000_MPHY_ADDR_CTL);
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- wr32(E1000_MPHY_ADDR_CTL, reg);
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
- reg = rd32(E1000_MPHY_DATA);
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
reg |= E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- wr32(E1000_MPHY_DATA, reg);
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
}
- reg = rd32(E1000_RCTL);
+ reg = E1000_READ_REG(hw, E1000_RCTL);
reg |= E1000_RCTL_LBM_TCVR;
- wr32(E1000_RCTL, reg);
+ E1000_WRITE_REG(hw, E1000_RCTL, reg);
- wr32(E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
+ E1000_WRITE_REG(hw, E1000_SCTL, E1000_ENABLE_SERDES_LOOPBACK);
- reg = rd32(E1000_CTRL);
+ reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~(E1000_CTRL_RFCE |
E1000_CTRL_TFCE |
E1000_CTRL_LRST);
reg |= E1000_CTRL_SLU |
E1000_CTRL_FD;
- wr32(E1000_CTRL, reg);
+ E1000_WRITE_REG(hw, E1000_CTRL, reg);
/* Unset switch control to serdes energy detect */
- reg = rd32(E1000_CONNSW);
+ reg = E1000_READ_REG(hw, E1000_CONNSW);
reg &= ~E1000_CONNSW_ENRGSRC;
- wr32(E1000_CONNSW, reg);
+ E1000_WRITE_REG(hw, E1000_CONNSW, reg);
+
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and newer devices
+ */
+ if (hw->mac.type >= e1000_82580) {
+ reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+ }
/* Set PCS register for forced speed */
- reg = rd32(E1000_PCS_LCTL);
+ reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
E1000_PCS_LCTL_FSD | /* Force Speed */
E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
- wr32(E1000_PCS_LCTL, reg);
+ E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
return 0;
}
@@ -1569,35 +1495,37 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
u16 phy_reg;
if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
- (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
+ (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+ (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
u32 reg;
/* Disable near end loopback on DH89xxCC */
- reg = rd32(E1000_MPHY_ADDR_CTL);
- reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
- E1000_MPHY_PCS_CLK_REG_OFFSET;
- wr32(E1000_MPHY_ADDR_CTL, reg);
+ reg = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTL);
+ reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK ) |
+ E1000_MPHY_PCS_CLK_REG_OFFSET;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTL, reg);
- reg = rd32(E1000_MPHY_DATA);
+ reg = E1000_READ_REG(hw, E1000_MPHY_DATA);
reg &= ~E1000_MPHY_PCS_CLK_REG_DIGINELBEN;
- wr32(E1000_MPHY_DATA, reg);
+ E1000_WRITE_REG(hw, E1000_MPHY_DATA, reg);
}
- rctl = rd32(E1000_RCTL);
+ rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- wr32(E1000_RCTL, rctl);
+ E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- hw->mac.autoneg = true;
- igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+ hw->mac.autoneg = TRUE;
+ e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
- igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
- igb_phy_sw_reset(hw);
+ if (hw->phy.type == I210_I_PHY_ID)
+ e1000_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
+ e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+ e1000_phy_commit(hw);
}
}
-
static void igb_create_lbtest_frame(struct sk_buff *skb,
unsigned int frame_size)
{
@@ -1608,51 +1536,77 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ data = rx_buffer->skb->data;
+#else
+ data = kmap(rx_buffer->page);
+#endif
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ kunmap(rx_buffer->page);
+
+#endif
+ return match;
}
-static int igb_clean_test_rings(struct igb_ring *rx_ring,
- struct igb_ring *tx_ring,
- unsigned int size)
+static u16 igb_clean_test_rings(struct igb_ring *rx_ring,
+ struct igb_ring *tx_ring,
+ unsigned int size)
{
union e1000_adv_rx_desc *rx_desc;
- struct igb_buffer *buffer_info;
- int rx_ntc, tx_ntc, count = 0;
- u32 staterr;
+ struct igb_rx_buffer *rx_buffer_info;
+ struct igb_tx_buffer *tx_buffer_info;
+ u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
tx_ntc = tx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
- while (staterr & E1000_RXD_STAT_DD) {
+ while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
/* check rx buffer */
- buffer_info = &rx_ring->buffer_info[rx_ntc];
-
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- rx_ring->rx_buffer_len,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
+
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
+ IGB_RX_HDR_LEN,
+#else
+ IGB_RX_BUFSZ,
+#endif
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
- buffer_info = &tx_ring->buffer_info[tx_ntc];
- igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
rx_ntc++;
@@ -1663,12 +1617,11 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
tx_ntc = 0;
/* fetch next descriptor */
- rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
- staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
/* re-map buffers to ring, store next to clean values */
- igb_alloc_rx_buffers_adv(rx_ring, count);
+ igb_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
tx_ring->next_to_clean = tx_ntc;
@@ -1679,8 +1632,9 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
{
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
- int i, j, lc, good_cnt, ret_val = 0;
- unsigned int size = 1024;
+ u16 i, j, lc, good_cnt;
+ int ret_val = 0;
+ unsigned int size = IGB_RX_HDR_LEN;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
@@ -1711,7 +1665,7 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
/* place 64 packets on the transmit queue*/
for (i = 0; i < 64; i++) {
skb_get(skb);
- tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
+ tx_ret_val = igb_xmit_frame_ring(skb, tx_ring);
if (tx_ret_val == NETDEV_TX_OK)
good_cnt++;
}
@@ -1741,21 +1695,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
{
/* PHY loopback cannot be performed if SoL/IDER
* sessions are active */
- if (igb_check_reset_block(&adapter->hw)) {
- dev_err(&adapter->pdev->dev,
+ if (e1000_check_reset_block(&adapter->hw)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
"Cannot do PHY loopback test "
"when SoL/IDER is active.\n");
*data = 0;
goto out;
}
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i210)) {
- dev_err(&adapter->pdev->dev,
- "Loopback test not supported "
- "on this part at this time.\n");
- *data = 0;
- goto out;
- }
+
*data = igb_setup_desc_rings(adapter);
if (*data)
goto out;
@@ -1763,6 +1710,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
if (*data)
goto err_loopback;
*data = igb_run_loopback_test(adapter);
+
igb_loopback_cleanup(adapter);
err_loopback:
@@ -1773,31 +1721,40 @@ out:
static int igb_link_test(struct igb_adapter *adapter, u64 *data)
{
- struct e1000_hw *hw = &adapter->hw;
+ u32 link;
+ int i, time;
+
*data = 0;
- if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+ time = 0;
+ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
- hw->mac.serdes_has_link = false;
+ adapter->hw.mac.serdes_has_link = FALSE;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
- hw->mac.ops.check_for_link(&adapter->hw);
- if (hw->mac.serdes_has_link)
- return *data;
+ e1000_check_for_link(&adapter->hw);
+ if (adapter->hw.mac.serdes_has_link)
+ goto out;
msleep(20);
} while (i++ < 3750);
*data = 1;
} else {
- hw->mac.ops.check_for_link(&adapter->hw);
- if (hw->mac.autoneg)
- msleep(4000);
-
- if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
+ for (i=0; i < IGB_MAX_LINK_TRIES; i++) {
+ link = igb_has_link(adapter);
+ if (link)
+ goto out;
+ else {
+ time++;
+ msleep(1000);
+ }
+ }
+ if (!link)
*data = 1;
}
- return *data;
+ out:
+ return *data;
}
static void igb_diag_test(struct net_device *netdev,
@@ -1817,10 +1774,10 @@ static void igb_diag_test(struct net_device *netdev,
forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
autoneg = adapter->hw.mac.autoneg;
- dev_info(&adapter->pdev->dev, "offline testing starting\n");
+ dev_info(pci_dev_to_dev(adapter->pdev), "offline testing starting\n");
/* power up link for link test */
- igb_power_up_link(adapter);
+ igb_power_up_link(adapter);
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
@@ -1845,8 +1802,10 @@ static void igb_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+
/* power up link for loopback test */
igb_power_up_link(adapter);
+
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1856,15 +1815,15 @@ static void igb_diag_test(struct net_device *netdev,
adapter->hw.mac.autoneg = autoneg;
/* force this routine to wait until autoneg complete/timeout */
- adapter->hw.phy.autoneg_wait_to_complete = true;
+ adapter->hw.phy.autoneg_wait_to_complete = TRUE;
igb_reset(adapter);
- adapter->hw.phy.autoneg_wait_to_complete = false;
+ adapter->hw.phy.autoneg_wait_to_complete = FALSE;
clear_bit(__IGB_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
- dev_info(&adapter->pdev->dev, "online testing starting\n");
+ dev_info(pci_dev_to_dev(adapter->pdev), "online testing starting\n");
/* PHY is powered down when interface is down */
if (if_running && igb_link_test(adapter, &data[4]))
@@ -1883,69 +1842,19 @@ static void igb_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC |
- WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
+ wol->supported = WAKE_UCAST | WAKE_MCAST |
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
+
/* apply any specific unsupported masks here */
switch (adapter->hw.device_id) {
default:
@@ -1971,8 +1880,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
@@ -1994,8 +1902,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
/* bit defines for adapter->led_status */
-#define IGB_LED_ON 0
-
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
static int igb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
@@ -2004,23 +1911,47 @@ static int igb_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
- igb_blink_led(hw);
+ e1000_blink_led(hw);
return 2;
case ETHTOOL_ID_ON:
- igb_blink_led(hw);
+ e1000_led_on(hw);
break;
case ETHTOOL_ID_OFF:
- igb_led_off(hw);
+ e1000_led_off(hw);
break;
case ETHTOOL_ID_INACTIVE:
- igb_led_off(hw);
- clear_bit(IGB_LED_ON, &adapter->led_status);
- igb_cleanup_led(hw);
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
break;
}
return 0;
}
+#else
+static int igb_phys_id(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long timeout;
+
+ timeout = data * 1000;
+
+ /*
+ * msleep_interruptable only accepts unsigned int so we are limited
+ * in how long a duration we can wait
+ */
+ if (!timeout || timeout > UINT_MAX)
+ timeout = UINT_MAX;
+
+ e1000_blink_led(hw);
+ msleep_interruptible(timeout);
+
+ e1000_led_off(hw);
+ e1000_cleanup_led(hw);
+
+ return 0;
+}
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
static int igb_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
@@ -2032,7 +1963,10 @@ static int igb_set_coalesce(struct net_device *netdev,
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
+ {
+ printk("set_coalesce:invalid parameter..");
return -EINVAL;
+ }
if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
((ec->tx_coalesce_usecs > 3) &&
@@ -2043,10 +1977,12 @@ static int igb_set_coalesce(struct net_device *netdev,
if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
return -EINVAL;
+ if (ec->tx_max_coalesced_frames_irq)
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
+
/* If ITR is disabled, disable DMAC */
if (ec->rx_coalesce_usecs == 0) {
- if (adapter->flags & IGB_FLAG_DMAC)
- adapter->flags &= ~IGB_FLAG_DMAC;
+ adapter->dmac = IGB_DMAC_DISABLE;
}
/* convert to rate of irq's per second */
@@ -2065,7 +2001,8 @@ static int igb_set_coalesce(struct net_device *netdev,
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
- if (q_vector->rx_ring)
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+ if (q_vector->rx.ring)
q_vector->itr_val = adapter->rx_itr_setting;
else
q_vector->itr_val = adapter->tx_itr_setting;
@@ -2087,6 +2024,8 @@ static int igb_get_coalesce(struct net_device *netdev,
else
ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
if (adapter->tx_itr_setting <= 3)
ec->tx_coalesce_usecs = adapter->tx_itr_setting;
@@ -2105,6 +2044,7 @@ static int igb_nway_reset(struct net_device *netdev)
return 0;
}
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
static int igb_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
@@ -2116,19 +2056,32 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
return -ENOTSUPP;
}
}
+#else
+static int igb_get_stats_count(struct net_device *netdev)
+{
+ return IGB_STATS_LEN;
+}
+
+static int igb_diag_test_count(struct net_device *netdev)
+{
+ return IGB_TEST_LEN;
+}
+#endif
static void igb_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- struct rtnl_link_stats64 *net_stats = &adapter->stats64;
- unsigned int start;
- struct igb_ring *ring;
- int i, j;
+#ifdef HAVE_NETDEV_STATS_IN_NETDEV
+ struct net_device_stats *net_stats = &netdev->stats;
+#else
+ struct net_device_stats *net_stats = &adapter->net_stats;
+#endif
+ u64 *queue_stat;
+ int i, j, k;
char *p;
- spin_lock(&adapter->stats64_lock);
- igb_update_stats(adapter, net_stats);
+ igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2141,36 +2094,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- u64 restart2;
-
- ring = adapter->tx_ring[j];
- do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
- data[i] = ring->tx_stats.packets;
- data[i+1] = ring->tx_stats.bytes;
- data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
- do {
- start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
- restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
- data[i+2] += restart2;
-
- i += IGB_TX_QUEUE_STATS_LEN;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
+ for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- ring = adapter->rx_ring[j];
- do {
- start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
- data[i] = ring->rx_stats.packets;
- data[i+1] = ring->rx_stats.bytes;
- data[i+2] = ring->rx_stats.drops;
- data[i+3] = ring->rx_stats.csum_err;
- data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
- i += IGB_RX_QUEUE_STATS_LEN;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
+ for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
+ data[i] = queue_stat[k];
}
- spin_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@ -2220,6 +2152,834 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+static int igb_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ switch (adapter->hw.mac.type) {
+#ifdef HAVE_PTP_1588_CLOCK
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+
+ /* 82576 does not support timestamping all packets. */
+ if (adapter->hw.mac.type >= e1000_82580)
+ info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ else
+ info->rx_filters |=
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+#endif /* HAVE_PTP_1588_CLOCK */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ pm_runtime_put(&adapter->pdev->dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef HAVE_NDO_SET_FEATURES
+static u32 igb_get_rx_csum(struct net_device *netdev)
+{
+ return !!(netdev->features & NETIF_F_RXCSUM);
+}
+
+static int igb_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ const u32 feature_list = NETIF_F_RXCSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+static int igb_set_tx_csum(struct net_device *netdev, u32 data)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+#ifdef NETIF_F_IPV6_CSUM
+ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+#else
+ u32 feature_list = NETIF_F_IP_CSUM;
+#endif
+
+ if (adapter->hw.mac.type >= e1000_82576)
+ feature_list |= NETIF_F_SCTP_CSUM;
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+ return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int igb_set_tso(struct net_device *netdev, u32 data)
+{
+#ifdef NETIF_F_TSO6
+ const u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6;
+#else
+ const u32 feature_list = NETIF_F_TSO;
+#endif
+
+ if (data)
+ netdev->features |= feature_list;
+ else
+ netdev->features &= ~feature_list;
+
+#ifndef HAVE_NETDEV_VLAN_FEATURES
+ if (!data) {
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct net_device *v_netdev;
+ int i;
+
+ /* disable TSO on all VLANs if they're present */
+ if (!adapter->vlgrp)
+ goto tso_out;
+
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+ if (!v_netdev)
+ continue;
+
+ v_netdev->features &= ~feature_list;
+ vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+ }
+ }
+
+tso_out:
+
+#endif /* HAVE_NETDEV_VLAN_FEATURES */
+ return 0;
+}
+
+#endif /* NETIF_F_TSO */
+#ifdef ETHTOOL_GFLAGS
+static int igb_set_flags(struct net_device *netdev, u32 data)
+{
+ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
+ ETH_FLAG_RXHASH;
+#ifndef HAVE_VLAN_RX_REGISTER
+ u32 changed = netdev->features ^ data;
+#endif
+ int rc;
+#ifndef IGB_NO_LRO
+
+ supported_flags |= ETH_FLAG_LRO;
+#endif
+ /*
+ * Since there is no support for separate tx vlan accel
+ * enabled make sure tx flag is cleared if rx is.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ data &= ~ETH_FLAG_TXVLAN;
+
+ rc = ethtool_op_set_flags(netdev, data, supported_flags);
+ if (rc)
+ return rc;
+#ifndef HAVE_VLAN_RX_REGISTER
+
+ if (changed & ETH_FLAG_RXVLAN)
+ igb_vlan_mode(netdev, data);
+#endif
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_SADV_COAL
+static int igb_set_adv_coal(struct net_device *netdev, struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ switch (edata->data) {
+ case IGB_DMAC_DISABLE:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MIN:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_500:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_EN_DEFAULT:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_2000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_3000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_4000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_5000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_6000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_7000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_8000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_9000:
+ adapter->dmac = edata->data;
+ break;
+ case IGB_DMAC_MAX:
+ adapter->dmac = edata->data;
+ break;
+ default:
+ adapter->dmac = IGB_DMAC_DISABLE;
+ printk("set_dmac: invalid setting, setting DMAC to %d\n",
+ adapter->dmac);
+ }
+ printk("%s: setting DMAC to %d\n", netdev->name, adapter->dmac);
+ return 0;
+}
+
+#endif /* ETHTOOL_SADV_COAL */
+#ifdef ETHTOOL_GADV_COAL
+static void igb_get_dmac(struct net_device *netdev,
+ struct ethtool_value *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ edata->data = adapter->dmac;
+
+ return;
+}
+#endif
+
+#ifdef ETHTOOL_GEEE
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ret_val;
+ u16 phy_data;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ if (!hw->dev_spec._82575.eee_disable)
+ edata->advertised =
+ mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+ /* The IPCNFG and EEER registers are not supported on I354. */
+ if (hw->mac.type == e1000_i354) {
+ e1000_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+ } else {
+ u32 eeer;
+
+ eeer = E1000_READ_REG(hw, E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+ }
+
+ /* EEE Link Partner Advertised */
+ switch (hw->mac.type) {
+ case e1000_i350:
+ ret_val = e1000_read_emi_reg(hw, E1000_EEE_LP_ADV_ADDR_I350,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ case e1000_i354:
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
+ E1000_EEE_LP_ADV_DEV_I210,
+ &phy_data);
+ if (ret_val)
+ return -ENODATA;
+
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ break;
+ default:
+ break;
+ }
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if ((hw->mac.type == e1000_i354) &&
+ (edata->eee_enabled))
+ edata->tx_lpi_enabled = true;
+
+ /*
+ * report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef ETHTOOL_SEEE
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI time is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->advertised &
+ ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(pci_dev_to_dev(adapter->pdev),
+ "Setting EEE options is not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+
+ /* reset link */
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+#endif /* ETHTOOL_SEEE */
+#ifdef ETHTOOL_GRXFH
+#ifdef ETHTOOL_GRXFHINDIR
+
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GRXFHINDIR */
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
+ void *rule_locs)
+#else
+ u32 *rule_locs)
+#endif
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+#ifdef ETHTOOL_GRXFHINDIR
+ case ETHTOOL_GRXFHINDIR:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+#endif /* ETHTOOL_GRXFHINDIR */
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /*
+ * RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = E1000_READ_REG(hw, E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ DPRINTK(DRV, WARNING,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#endif /* ETHTOOL_GRXFH */
+#ifdef ETHTOOL_GRXFHINDIR
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGB_RETA_SIZE;
+}
+
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
+ break;
+ default:
+ break;
+ }
+
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ E1000_WRITE_REG(hw, reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 num_queues;
+
+ num_queues = adapter->rss_queues;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] > num_queues)
+ return -EINVAL;
+
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igb_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+#endif /* ETHTOOL_SRXFHINDIR */
+#ifdef ETHTOOL_GCHANNELS
+
+static unsigned int igb_max_rss_queues(struct igb_adapter *adapter)
+{
+ unsigned int max_rss_queues;
+
+ /* Determine the maximum number of RSS queues supported. */
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
+ case e1000_i210:
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ return max_rss_queues;
+}
+
+static void igb_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = igb_max_rss_queues(adapter);
+ ch->max_rx = ch->max_combined;
+ if (adapter->vfs_allocated_count)
+ ch->max_tx = 1;
+ else
+ ch->max_tx = ch->max_combined;
+
+ /* report info for other vector */
+ if (adapter->msix_entries) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS/TSS queues */
+ if (adapter->flags & IGB_FLAG_QUEUE_PAIRS) {
+ if (adapter->num_rx_queues > adapter->num_tx_queues) {
+ ch->combined_count = adapter->num_tx_queues;
+ ch->rx_count = adapter->num_rx_queues -
+ adapter->num_tx_queues;
+ } else if (adapter->num_rx_queues < adapter->num_tx_queues) {
+ ch->combined_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues -
+ adapter->num_rx_queues;
+ } else {
+ ch->combined_count = adapter->num_rx_queues;
+ }
+ } else {
+ ch->rx_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues;
+ }
+}
+#endif /* ETHTOOL_GCHANNELS */
+#ifdef ETHTOOL_SCHANNELS
+
+static int igb_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ unsigned int max_rss_queues;
+
+ /* we cannot support combined, Rx, and Tx vectors simultaneously */
+ if (ch->combined_count && ch->rx_count && ch->tx_count)
+ return -EINVAL;
+
+ /* ignore other_count since it is not changeable */
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->combined_count && (!ch->rx_count || !ch->tx_count))
+ return -EINVAL;
+
+ /* verify number of Tx queues does not exceed 1 if SR-IOV is enabled */
+ if (adapter->vfs_allocated_count &&
+ ((ch->combined_count + ch->tx_count) > 1))
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ max_rss_queues = igb_max_rss_queues(adapter);
+ if (((ch->combined_count + ch->rx_count) > max_rss_queues) ||
+ ((ch->combined_count + ch->tx_count) > max_rss_queues))
+ return -EINVAL;
+
+ /* Determine if we need to pair queues. */
+ switch (adapter->hw.mac.type) {
+ case e1000_82575:
+ case e1000_i211:
+ /* Device supports enough interrupts without queue pairing. */
+ break;
+ case e1000_i350:
+ /* The PF has 3 interrupts and 1 queue pair w/ SR-IOV */
+ if (adapter->vfs_allocated_count)
+ break;
+ case e1000_82576:
+ /*
+ * The PF has access to 6 interrupt vectors if the number of
+ * VFs is less than 7. If that is the case we don't have
+ * to pair up the queues.
+ */
+ if ((adapter->vfs_allocated_count > 0) &&
+ (adapter->vfs_allocated_count < 7))
+ break;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i210:
+ default:
+ /* verify we can support as many queues as requested */
+ if ((ch->combined_count +
+ ch->rx_count + ch->tx_count) > MAX_Q_VECTORS)
+ return -EINVAL;
+ break;
+ }
+
+ /* update configuration values */
+ adapter->rss_queues = ch->combined_count + ch->rx_count;
+ if (ch->rx_count == ch->tx_count || adapter->vfs_allocated_count)
+ adapter->tss_queues = 0;
+ else
+ adapter->tss_queues = ch->combined_count + ch->tx_count;
+
+ if (ch->combined_count)
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ else
+ adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
+
+ /* update queue configuration for adapter */
+ return igb_setup_queues(adapter);
+}
+
+#endif /* ETHTOOL_SCHANNELS */
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2241,14 +3001,106 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_pauseparam = igb_set_pauseparam,
.self_test = igb_diag_test,
.get_strings = igb_get_strings,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_SET_PHYS_ID
.set_phys_id = igb_set_phys_id,
+#else
+ .phys_id = igb_phys_id,
+#endif /* HAVE_ETHTOOL_SET_PHYS_ID */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef HAVE_ETHTOOL_GET_SSET_COUNT
.get_sset_count = igb_get_sset_count,
+#else
+ .get_stats_count = igb_get_stats_count,
+ .self_test_count = igb_diag_test_count,
+#endif
.get_ethtool_stats = igb_get_ethtool_stats,
+#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
+ .get_perm_addr = ethtool_op_get_perm_addr,
+#endif
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef HAVE_ETHTOOL_GET_TS_INFO
+ .get_ts_info = igb_get_ts_info,
+#endif /* HAVE_ETHTOOL_GET_TS_INFO */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef CONFIG_PM_RUNTIME
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
+#endif /* CONFIG_PM_RUNTIME */
+#ifndef HAVE_NDO_SET_FEATURES
+ .get_rx_csum = igb_get_rx_csum,
+ .set_rx_csum = igb_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = igb_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = igb_set_tso,
+#endif
+#ifdef ETHTOOL_GFLAGS
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = igb_set_flags,
+#endif /* ETHTOOL_GFLAGS */
+#endif /* HAVE_NDO_SET_FEATURES */
+#ifdef ETHTOOL_GADV_COAL
+ .get_advcoal = igb_get_adv_coal,
+ .set_advcoal = igb_set_dmac_coal,
+#endif /* ETHTOOL_GADV_COAL */
+#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+#ifdef ETHTOOL_GEEE
+ .get_eee = igb_get_eee,
+#endif
+#ifdef ETHTOOL_SEEE
+ .set_eee = igb_set_eee,
+#endif
+#ifdef ETHTOOL_GRXFHINDIR
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+#endif /* ETHTOOL_GRXFHINDIR */
+#ifdef ETHTOOL_SRXFHINDIR
+ .set_rxfh_indir = igb_set_rxfh_indir,
+#endif /* ETHTOOL_SRXFHINDIR */
+#ifdef ETHTOOL_GCHANNELS
+ .get_channels = igb_get_channels,
+#endif /* ETHTOOL_GCHANNELS */
+#ifdef ETHTOOL_SCHANNELS
+ .set_channels = igb_set_channels,
+#endif /* ETHTOOL_SCHANNELS */
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#ifdef ETHTOOL_GRXFH
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+#endif
+};
+
+#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
+static const struct ethtool_ops_ext igb_ethtool_ops_ext = {
+ .size = sizeof(struct ethtool_ops_ext),
+ .get_ts_info = igb_get_ts_info,
+ .set_phys_id = igb_set_phys_id,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_channels = igb_get_channels,
+ .set_channels = igb_set_channels,
};
void igb_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ set_ethtool_ops_ext(netdev, &igb_ethtool_ops_ext);
}
+#else
+void igb_set_ethtool_ops(struct net_device *netdev)
+{
+ /* have to "undeclare" const on this struct to remove warnings */
+ SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igb_ethtool_ops);
+}
+#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */
+#endif /* SIOCETHTOOL */
+