summaryrefslogtreecommitdiff
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/bitfield.h7
-rw-r--r--drivers/net/sfc/boards.c9
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c88
-rw-r--r--drivers/net/sfc/enum.h49
-rw-r--r--drivers/net/sfc/ethtool.c259
-rw-r--r--drivers/net/sfc/falcon.c97
-rw-r--r--drivers/net/sfc/falcon.h5
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h20
-rw-r--r--drivers/net/sfc/falcon_io.h29
-rw-r--r--drivers/net/sfc/falcon_xmac.c92
-rw-r--r--drivers/net/sfc/mdio_10g.c78
-rw-r--r--drivers/net/sfc/mdio_10g.h24
-rw-r--r--drivers/net/sfc/net_driver.h72
-rw-r--r--drivers/net/sfc/rx.c59
-rw-r--r--drivers/net/sfc/selftest.c719
-rw-r--r--drivers/net/sfc/selftest.h50
-rw-r--r--drivers/net/sfc/sfe4001.c28
-rw-r--r--drivers/net/sfc/tenxpress.c93
-rw-r--r--drivers/net/sfc/tx.c669
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sfc/xfp_phy.c38
23 files changed, 2300 insertions, 193 deletions
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 0f023447eafd..1d2daeec7ac1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,5 @@
sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \
- i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
- tenxpress.o boards.o sfe4001.o
+ i2c-direct.o selftest.o ethtool.o xfp_phy.o \
+ mdio_10g.o tenxpress.o boards.o sfe4001.o
obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2806201644cc..2c79d27404e0 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -483,7 +483,7 @@ typedef union efx_oword {
#endif
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
- if (FALCON_REV(efx) >= FALCON_REV_B0) { \
+ if (falcon_rev(efx) >= FALCON_REV_B0) { \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
} else { \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@@ -491,7 +491,7 @@ typedef union efx_oword {
} while (0)
#define EFX_QWORD_FIELD_VER(efx, qword, field) \
- (FALCON_REV(efx) >= FALCON_REV_B0 ? \
+ (falcon_rev(efx) >= FALCON_REV_B0 ? \
EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_A1))
@@ -501,8 +501,5 @@ typedef union efx_oword {
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
-#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
- ~((u64) 0) : ~((u32) 0))
-#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index eecaa6d58584..7fc0328dc055 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
struct efx_blinker *bl = &efx->board_info.blinker;
efx->board_info.set_fault_led(efx, bl->state);
bl->state = !bl->state;
- if (bl->resubmit) {
- bl->timer.expires = jiffies + BLINK_INTERVAL;
- add_timer(&bl->timer);
- }
+ if (bl->resubmit)
+ mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
}
static void board_blink(struct efx_nic *efx, int blink)
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
blinker->state = 0;
setup_timer(&blinker->timer, blink_led_timer,
(unsigned long)efx);
- blinker->timer.expires = jiffies + BLINK_INTERVAL;
- add_timer(&blinker->timer);
+ mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
} else {
blinker->resubmit = 0;
if (blinker->timer.function)
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index f56341d428e1..695764dc2e64 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -22,5 +22,7 @@ enum efx_board_type {
extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
extern int sfe4001_poweron(struct efx_nic *efx);
extern void sfe4001_poweroff(struct efx_nic *efx);
+/* Are we putting the PHY into flash config mode */
+extern unsigned int sfe4001_phy_flash_cfg;
#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 59edcf793c19..449760642e31 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
*/
static inline void efx_channel_processed(struct efx_channel *channel)
{
- /* Write to EVQ_RPTR_REG. If a new event arrived in a race
- * with finishing processing, a new interrupt will be raised.
- */
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we acknowledge the events we've seen. Make sure
+ * it's cleared before then. */
channel->work_pending = 0;
- smp_wmb(); /* Ensure channel updated before any new interrupt. */
+ smp_wmb();
+
falcon_eventq_read_ack(channel);
}
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable(&channel->napi_str);
/* Poll the channel */
- (void) efx_process_channel(channel, efx->type->evq_size);
+ efx_process_channel(channel, efx->type->evq_size);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
*
*************************************************************************/
-/* Setup per-NIC RX buffer parameters.
- * Calculate the rx buffer allocation parameters required to support
- * the current MTU, including padding for header alignment and overruns.
- */
-static void efx_calc_rx_buffer_params(struct efx_nic *efx)
-{
- unsigned int order, len;
-
- len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_padding);
-
- /* Calculate page-order */
- for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
- ;
-
- efx->rx_buffer_len = len;
- efx->rx_buffer_order = order;
-}
-
static int efx_probe_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
struct efx_channel *channel;
int rc = 0;
- efx_calc_rx_buffer_params(efx);
+ /* Calculate the rx buffer allocation parameters required to
+ * support the current MTU, including padding for header
+ * alignment and overruns.
+ */
+ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len);
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight);
+ /* The interrupt handler for this channel may set work_pending
+ * as soon as we enable it. Make sure it's cleared before
+ * then. Similarly, make sure it sees the enabled flag set. */
channel->work_pending = 0;
channel->enabled = 1;
- smp_wmb(); /* ensure channel updated before first interrupt */
+ smp_wmb();
napi_enable(&channel->napi_str);
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */
- if (NET_DEV_REGISTERED(efx)) {
+ if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
- EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
- efx->type->mem_bar, efx->membase_phys,
+ EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
+ efx->type->mem_bar,
+ (unsigned long long)efx->membase_phys,
efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
- efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
- efx->membase);
+ EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
+ efx->type->mem_bar, (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size, efx->membase);
return 0;
fail4:
release_mem_region(efx->membase_phys, efx->type->mem_map_size);
fail3:
- efx->membase_phys = 0UL;
+ efx->membase_phys = 0;
fail2:
pci_disable_device(efx->pci_dev);
fail1:
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
if (efx->membase_phys) {
pci_release_region(efx->pci_dev, efx->type->mem_bar);
- efx->membase_phys = 0UL;
+ efx->membase_phys = 0;
}
pci_disable_device(efx->pci_dev);
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
- if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
+ if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
return;
/* Mark the port as enabled so port reconfigurations can start, then
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->monitor_work);
/* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx) {
+ efx_for_each_rx_queue(rx_queue, efx)
cancel_delayed_work_sync(&rx_queue->work);
- }
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->reconfigure_work);
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
falcon_disable_interrupts(efx);
if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
+ }
/* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx)
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
efx_stop_queue(efx);
- if (NET_DEV_REGISTERED(efx)) {
+ if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock held, non-blocking. */
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
struct efx_nic *efx = net_dev->priv;
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct net_device_stats *stats = &net_dev->stats;
+ /* Update stats if possible, but do not wait if another thread
+ * is updating them (or resetting the NIC); slightly stale
+ * stats are acceptable.
+ */
if (!spin_trylock(&efx->stats_lock))
return stats;
if (efx->state == STATE_RUNNING) {
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct net_device *net_dev = (struct net_device *)ptr;
+ struct net_device *net_dev = ptr;
if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
struct efx_nic *efx = net_dev->priv;
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
efx_for_each_tx_queue(tx_queue, efx)
efx_release_tx_buffers(tx_queue);
- if (NET_DEV_REGISTERED(efx)) {
+ if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
unregister_netdev(efx->net_dev);
}
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
if (method == RESET_TYPE_DISABLE) {
/* Reinitialise the device anyway so the driver unload sequence
* can talk to the external SRAM */
- (void) falcon_init_nic(efx);
+ falcon_init_nic(efx);
rc = -EIO;
goto fail4;
}
@@ -1873,6 +1869,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
tx_queue->queue = i;
tx_queue->buffer = NULL;
tx_queue->channel = &efx->channel[0]; /* for safety */
+ tx_queue->tso_headers_free = NULL;
}
for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
rx_queue = &efx->rx_queue[i];
@@ -2071,7 +2068,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
net_dev = alloc_etherdev(sizeof(*efx));
if (!net_dev)
return -ENOMEM;
- net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
+ net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_TSO);
if (lro)
net_dev->features |= NETIF_F_LRO;
efx = net_dev->priv;
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 43663a4619da..c53290d08e2b 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -10,6 +10,55 @@
#ifndef EFX_ENUM_H
#define EFX_ENUM_H
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_XGMII: loopback within MAC at XGMII level
+ * @LOOPBACK_XGXS: loopback within MAC at XGXS level
+ * @LOOPBACK_XAUI: loopback within MAC at XAUI level
+ * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level
+ * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
+ */
+/* Please keep in order and up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+ LOOPBACK_NONE = 0,
+ LOOPBACK_MAC = 1,
+ LOOPBACK_XGMII = 2,
+ LOOPBACK_XGXS = 3,
+ LOOPBACK_XAUI = 4,
+ LOOPBACK_PHY = 5,
+ LOOPBACK_PHYXS = 6,
+ LOOPBACK_PCS = 7,
+ LOOPBACK_PMAPMD = 8,
+ LOOPBACK_NETWORK = 9,
+ LOOPBACK_MAX
+};
+
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+extern const char *efx_loopback_mode_names[];
+#define LOOPBACK_MODE_NAME(mode) \
+ STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
+#define LOOPBACK_MODE(efx) \
+ LOOPBACK_MODE_NAME(efx->loopback_mode)
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \
+ (1 << LOOPBACK_XGXS) | \
+ (1 << LOOPBACK_XAUI))
+
+#define LOOPBACK_MASK(_efx) \
+ (1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx) \
+ ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0)
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask) \
+ (((LOOPBACK_MASK(_from) & (_mask)) && \
+ ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
+
/*****************************************************************************/
/**
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index ad541badbd98..e2c75d101610 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -12,12 +12,26 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
+#include "selftest.h"
#include "efx.h"
#include "ethtool.h"
#include "falcon.h"
#include "gmii.h"
#include "mac.h"
+const char *efx_loopback_mode_names[] = {
+ [LOOPBACK_NONE] = "NONE",
+ [LOOPBACK_MAC] = "MAC",
+ [LOOPBACK_XGMII] = "XGMII",
+ [LOOPBACK_XGXS] = "XGXS",
+ [LOOPBACK_XAUI] = "XAUI",
+ [LOOPBACK_PHY] = "PHY",
+ [LOOPBACK_PHYXS] = "PHY(XS)",
+ [LOOPBACK_PCS] = "PHY(PCS)",
+ [LOOPBACK_PMAPMD] = "PHY(PMAPMD)",
+ [LOOPBACK_NETWORK] = "NETWORK",
+};
+
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
struct ethtool_string {
@@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index: Index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ * @test: Pointer to test result (used only if data != %NULL)
+ * @unit_format: Unit name format (e.g. "channel\%d")
+ * @unit_id: Unit id (e.g. 0 for "channel0")
+ * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index,
+ struct ethtool_string *strings, u64 *data,
+ int *test, const char *unit_format, int unit_id,
+ const char *test_format, const char *test_id)
+{
+ struct ethtool_string unit_str, test_str;
+
+ /* Fill data value, if applicable */
+ if (data)
+ data[test_index] = *test;
+
+ /* Fill string, if applicable */
+ if (strings) {
+ snprintf(unit_str.name, sizeof(unit_str.name),
+ unit_format, unit_id);
+ snprintf(test_str.name, sizeof(test_str.name),
+ test_format, test_id);
+ snprintf(strings[test_index].name,
+ sizeof(strings[test_index].name),
+ "%-9s%-17s", unit_str.name, test_str.name);
+ }
+}
+
+#define EFX_PORT_NAME "port%d", 0
+#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
+#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_LOOPBACK_NAME(_mode, _counter) \
+ "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx: Efx NIC
+ * @lb_tests: Efx loopback self-test results structure
+ * @mode: Loopback test mode
+ * @test_index: Starting index of the test
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+ struct efx_loopback_self_tests *lb_tests,
+ enum efx_loopback_mode mode,
+ unsigned int test_index,
+ struct ethtool_string *strings, u64 *data)
+{
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_tx_queue(tx_queue, efx) {
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_sent[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_sent"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->tx_done[tx_queue->queue],
+ EFX_TX_QUEUE_NAME(tx_queue),
+ EFX_LOOPBACK_NAME(mode, "tx_done"));
+ }
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_good,
+ EFX_PORT_NAME,
+ EFX_LOOPBACK_NAME(mode, "rx_good"));
+ efx_fill_test(test_index++, strings, data,
+ &lb_tests->rx_bad,
+ EFX_PORT_NAME,
+ EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+ return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx: Efx NIC
+ * @tests: Efx self-test results structure, or %NULL
+ * @strings: Ethtool strings, or %NULL
+ * @data: Ethtool test results, or %NULL
+ */
+static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ struct ethtool_string *strings,
+ u64 *data)
+{
+ struct efx_channel *channel;
+ unsigned int n = 0;
+ enum efx_loopback_mode mode;
+
+ /* Interrupt */
+ efx_fill_test(n++, strings, data, &tests->interrupt,
+ "core", 0, "interrupt", NULL);
+
+ /* Event queues */
+ efx_for_each_channel(channel, efx) {
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_dma[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.dma", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_int[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.int", NULL);
+ efx_fill_test(n++, strings, data,
+ &tests->eventq_poll[channel->channel],
+ EFX_CHANNEL_NAME(channel),
+ "eventq.poll", NULL);
+ }
+
+ /* PHY presence */
+ efx_fill_test(n++, strings, data, &tests->phy_ok,
+ EFX_PORT_NAME, "phy_ok", NULL);
+
+ /* Loopback tests */
+ efx_fill_test(n++, strings, data, &tests->loopback_speed,
+ EFX_PORT_NAME, "loopback.speed", NULL);
+ efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
+ EFX_PORT_NAME, "loopback.full_duplex", NULL);
+ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ if (!(efx->loopback_modes & (1 << mode)))
+ continue;
+ n = efx_fill_loopback_test(efx,
+ &tests->loopback[mode], mode, n,
+ strings, data);
+ }
+
+ return n;
+}
+
static int efx_ethtool_get_stats_count(struct net_device *net_dev)
{
return EFX_ETHTOOL_NUM_STATS;
}
+static int efx_ethtool_self_test_count(struct net_device *net_dev)
+{
+ struct efx_nic *efx = net_dev->priv;
+
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+}
+
static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
+ struct efx_nic *efx = net_dev->priv;
struct ethtool_string *ethtool_strings =
(struct ethtool_string *)strings;
int i;
- if (string_set == ETH_SS_STATS)
+ switch (string_set) {
+ case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
strncpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
+ break;
+ case ETH_SS_TEST:
+ efx_ethtool_fill_self_tests(efx, NULL,
+ ethtool_strings, NULL);
+ break;
+ default:
+ /* No other string sets */
+ break;
+ }
}
static void efx_ethtool_get_stats(struct net_device *net_dev,
@@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
}
}
+static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
+{
+ int rc;
+
+ /* Our TSO requires TX checksumming, so force TX checksumming
+ * on when TSO is enabled.
+ */
+ if (enable) {
+ rc = efx_ethtool_set_tx_csum(net_dev, 1);
+ if (rc)
+ return rc;
+ }
+
+ return ethtool_op_set_tso(net_dev, enable);
+}
+
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = net_dev->priv;
@@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
efx_flush_queues(efx);
+ /* Our TSO requires TX checksumming, so disable TSO when
+ * checksumming is disabled
+ */
+ if (!enable) {
+ rc = efx_ethtool_set_tso(net_dev, 0);
+ if (rc)
+ return rc;
+ }
+
return 0;
}
@@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
return efx->rx_checksum_enabled;
}
+static void efx_ethtool_self_test(struct net_device *net_dev,
+ struct ethtool_test *test, u64 *data)
+{
+ struct efx_nic *efx = net_dev->priv;
+ struct efx_self_tests efx_tests;
+ int offline, already_up;
+ int rc;
+
+ ASSERT_RTNL();
+ if (efx->state != STATE_RUNNING) {
+ rc = -EIO;
+ goto fail1;
+ }
+
+ /* We need rx buffers and interrupts. */
+ already_up = (efx->net_dev->flags & IFF_UP);
+ if (!already_up) {
+ rc = dev_open(efx->net_dev);
+ if (rc) {
+ EFX_ERR(efx, "failed opening device.\n");
+ goto fail2;
+ }
+ }
+
+ memset(&efx_tests, 0, sizeof(efx_tests));
+ offline = (test->flags & ETH_TEST_FL_OFFLINE);
+
+ /* Perform online self tests first */
+ rc = efx_online_test(efx, &efx_tests);
+ if (rc)
+ goto out;
+
+ /* Perform offline tests only if online tests passed */
+ if (offline) {
+ /* Stop the kernel from sending packets during the test. */
+ efx_stop_queue(efx);
+ rc = efx_flush_queues(efx);
+ if (!rc)
+ rc = efx_offline_test(efx, &efx_tests,
+ efx->loopback_modes);
+ efx_wake_queue(efx);
+ }
+
+ out:
+ if (!already_up)
+ dev_close(efx->net_dev);
+
+ EFX_LOG(efx, "%s all %sline self-tests\n",
+ rc == 0 ? "passed" : "failed", offline ? "off" : "on");
+
+ fail2:
+ fail1:
+ /* Fill ethtool results structures */
+ efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ if (rc)
+ test->flags |= ETH_TEST_FL_FAILED;
+}
+
/* Restart autonegotiation */
static int efx_ethtool_nway_reset(struct net_device *net_dev)
{
@@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = {
.set_tx_csum = efx_ethtool_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = efx_ethtool_set_tso,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
+ .self_test_count = efx_ethtool_self_test_count,
+ .self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
.phys_id = efx_ethtool_phys_id,
.get_stats_count = efx_ethtool_get_stats_count,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 46db549ce580..790db89db345 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
**************************************************************************
*/
-/* DMA address mask (up to 46-bit, avoiding compiler warnings)
- *
- * Note that it is possible to have a platform with 64-bit longs and
- * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
- * platform DMA mask.
- */
-#if BITS_PER_LONG == 64
-#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
-#else
-#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
-#endif
+/* DMA address mask */
+#define FALCON_DMA_MASK DMA_BIT_MASK(46)
/* TX DMA length mask (13-bit) */
#define FALCON_TX_DMA_MASK (4096 - 1)
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
#define FALCON_IS_DUAL_FUNC(efx) \
- (FALCON_REV(efx) < FALCON_REV_B0)
+ (falcon_rev(efx) < FALCON_REV_B0)
/**************************************************************************
*
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_DESCQ_TYPE, 0,
TX_NON_IP_DROP_DIS_B0, 1);
- if (FALCON_REV(efx) >= FALCON_REV_B0) {
+ if (falcon_rev(efx) >= FALCON_REV_B0) {
int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
- if (FALCON_REV(efx) < FALCON_REV_B0) {
+ if (falcon_rev(efx) < FALCON_REV_B0) {
efx_oword_t reg;
BUG_ON(tx_queue->queue >= 128); /* HW limit */
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
int rc;
- int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
+ int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
int iscsi_digest_en = is_b0;
EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
continue;
break;
}
- if (rc)
+ if (rc) {
EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+ }
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
@@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
- if (NET_DEV_REGISTERED(efx))
+ if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev);
falcon_notify_tx_desc(tx_queue);
- if (NET_DEV_REGISTERED(efx))
+ if (efx_dev_registered(efx))
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
@@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
+ rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
@@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
EFX_QWORD_FIELD(*event, XG_PHY_INTR))
is_phy_event = 1;
- if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
+ if ((falcon_rev(efx) >= FALCON_REV_B0) &&
EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
is_phy_event = 1;
@@ -1129,6 +1122,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
case RX_RECOVERY_EV_DECODE:
EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
@@ -1404,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+ efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
static int n_int_errors;
@@ -1450,8 +1444,8 @@ out:
*/
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{
- struct efx_nic *efx = (struct efx_nic *)dev_id;
- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
@@ -1488,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
- struct efx_nic *efx = (struct efx_nic *)dev_id;
- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+ struct efx_nic *efx = dev_id;
+ efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel;
int syserr;
int queues;
@@ -1541,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*/
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
{
- struct efx_channel *channel = (struct efx_channel *)dev_id;
+ struct efx_channel *channel = dev_id;
struct efx_nic *efx = channel->efx;
- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+ efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
efx->last_irq_cpu = raw_smp_processor_id();
@@ -1571,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
unsigned long offset;
efx_dword_t dword;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if (falcon_rev(efx) < FALCON_REV_B0)
return;
for (offset = RX_RSS_INDIR_TBL_B0;
@@ -1594,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler;
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
handler = falcon_legacy_interrupt_b0;
else
handler = falcon_legacy_interrupt_a1;
@@ -1635,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */
- efx_for_each_channel_with_interrupt(channel, efx)
+ efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq)
free_irq(channel->irq, channel);
+ }
/* ACK legacy interrupt */
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
falcon_read(efx, &reg, INT_ISR0_B0);
else
falcon_irq_ack_a1(efx);
@@ -1731,7 +1726,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
efx_oword_t temp;
int count;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if ((falcon_rev(efx) < FALCON_REV_B0) ||
+ (efx->loopback_mode != LOOPBACK_NONE))
return;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
@@ -1783,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{
efx_oword_t temp;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if (falcon_rev(efx) < FALCON_REV_B0)
return;
/* Isolate the MAC -> RX */
@@ -1821,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
* discarded. */
- if (FALCON_REV(efx) >= FALCON_REV_B0) {
+ if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
!efx->link_up);
}
@@ -1839,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
/* Unisolate the MAC -> RX */
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
falcon_write(efx, &reg, RX_CFG_REG_KER);
}
@@ -1854,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
return 0;
/* Statistics fetch will fail if the MAC is in TX drain */
- if (FALCON_REV(efx) >= FALCON_REV_B0) {
+ if (falcon_rev(efx) >= FALCON_REV_B0) {
efx_oword_t temp;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@@ -1938,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
int addr, int value)
{
- struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
+ struct efx_nic *efx = net_dev->priv;
unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
@@ -2006,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
* could be read, -1 will be returned. */
static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
{
- struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
+ struct efx_nic *efx = net_dev->priv;
unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg;
int value = -1;
@@ -2091,6 +2087,8 @@ static int falcon_probe_phy(struct efx_nic *efx)
efx->phy_type);
return -1;
}
+
+ efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
return 0;
}
@@ -2109,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx)
falcon_init_mdio(&efx->mii);
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
efx->flow_control = EFX_FC_RX | EFX_FC_TX;
else
efx->flow_control = EFX_FC_RX;
@@ -2369,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
return -ENODEV;
}
- switch (FALCON_REV(efx)) {
+ switch (falcon_rev(efx)) {
case FALCON_REV_A0:
case 0xff:
EFX_ERR(efx, "Falcon rev A0 not supported\n");
@@ -2395,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
break;
default:
- EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
+ EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
return -ENODEV;
}
@@ -2415,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
- efx->nic_data = (void *) nic_data;
+ efx->nic_data = nic_data;
/* Determine number of ports etc. */
rc = falcon_probe_nic_variant(efx);
@@ -2468,14 +2466,12 @@ int falcon_probe_nic(struct efx_nic *efx)
fail5:
falcon_free_buffer(efx, &efx->irq_status);
fail4:
- /* fall-thru */
fail3:
if (nic_data->pci_dev2) {
pci_dev_put(nic_data->pci_dev2);
nic_data->pci_dev2 = NULL;
}
fail2:
- /* fall-thru */
fail1:
kfree(efx->nic_data);
return rc;
@@ -2487,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx)
*/
int falcon_init_nic(struct efx_nic *efx)
{
- struct falcon_nic_data *data;
efx_oword_t temp;
unsigned thresh;
int rc;
- data = (struct falcon_nic_data *)efx->nic_data;
-
/* Set up the address region register. This is only needed
* for the B0 FPGA, but since we are just pushing in the
* reset defaults this may as well be unconditional. */
@@ -2560,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Set number of RSS queues for receive path. */
falcon_read(efx, &temp, RX_FILTER_CTL_REG);
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
else
EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@@ -2598,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
/* Squash TX of packets of 16 bytes or less */
- if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
+ if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
falcon_write(efx, &temp, TX_CFG2_REG_KER);
@@ -2615,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx)
if (EFX_WORKAROUND_7575(efx))
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
(3 * 4096) / 32);
- if (FALCON_REV(efx) >= FALCON_REV_B0)
+ if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
/* RX FIFO flow control thresholds */
@@ -2631,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx)
falcon_write(efx, &temp, RX_CFG_REG_KER);
/* Set destination of both TX and RX Flush events */
- if (FALCON_REV(efx) >= FALCON_REV_B0) {
+ if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
falcon_write(efx, &temp, DP_CTRL_REG);
}
@@ -2645,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx)
falcon_free_buffer(efx, &efx->irq_status);
- (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
+ falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */
if (nic_data->pci_dev2) {
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 6117403b0c03..492f9bc28840 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -23,7 +23,10 @@ enum falcon_revision {
FALCON_REV_B0 = 2,
};
-#define FALCON_REV(efx) ((efx)->pci_dev->revision)
+static inline int falcon_rev(struct efx_nic *efx)
+{
+ return efx->pci_dev->revision;
+}
extern struct efx_nic_type falcon_a_nic_type;
extern struct efx_nic_type falcon_b_nic_type;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 0485a63eaff6..6d003114eeab 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -636,6 +636,14 @@
#define XX_HIDRVA_WIDTH 1
#define XX_LODRVA_LBN 8
#define XX_LODRVA_WIDTH 1
+#define XX_LPBKD_LBN 3
+#define XX_LPBKD_WIDTH 1
+#define XX_LPBKC_LBN 2
+#define XX_LPBKC_WIDTH 1
+#define XX_LPBKB_LBN 1
+#define XX_LPBKB_WIDTH 1
+#define XX_LPBKA_LBN 0
+#define XX_LPBKA_WIDTH 1
#define XX_TXDRV_CTL_REG_MAC 0x12
#define XX_DEQD_LBN 28
@@ -656,8 +664,14 @@
#define XX_DTXA_WIDTH 4
/* XAUI XGXS core status register */
-#define XX_FORCE_SIG_DECODE_FORCED 0xff
#define XX_CORE_STAT_REG_MAC 0x16
+#define XX_FORCE_SIG_LBN 24
+#define XX_FORCE_SIG_WIDTH 8
+#define XX_FORCE_SIG_DECODE_FORCED 0xff
+#define XX_XGXS_LB_EN_LBN 23
+#define XX_XGXS_LB_EN_WIDTH 1
+#define XX_XGMII_LB_EN_LBN 22
+#define XX_XGMII_LB_EN_WIDTH 1
#define XX_ALIGN_DONE_LBN 20
#define XX_ALIGN_DONE_WIDTH 1
#define XX_SYNC_STAT_LBN 16
@@ -1111,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
u8 port1_phy_type;
__le16 asic_sub_revision;
__le16 board_revision;
-} __attribute__ ((packed));
+} __packed;
#define NVCONFIG_BASE 0x300
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@@ -1130,6 +1144,6 @@ struct falcon_nvconfig {
__le16 board_struct_ver;
__le16 board_checksum;
struct falcon_nvconfig_board_v2 board_v2;
-} __attribute__ ((packed));
+} __packed;
#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index ea08184ddfa9..6670cdfc41ab 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -56,14 +56,27 @@
#define FALCON_USE_QWORD_IO 1
#endif
-#define _falcon_writeq(efx, value, reg) \
- __raw_writeq((__force u64) (value), (efx)->membase + (reg))
-#define _falcon_writel(efx, value, reg) \
- __raw_writel((__force u32) (value), (efx)->membase + (reg))
-#define _falcon_readq(efx, reg) \
- ((__force __le64) __raw_readq((efx)->membase + (reg)))
-#define _falcon_readl(efx, reg) \
- ((__force __le32) __raw_readl((efx)->membase + (reg)))
+#ifdef FALCON_USE_QWORD_IO
+static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
+ unsigned int reg)
+{
+ __raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
+ unsigned int reg)
+{
+ __raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
+{
+ return (__force __le32)__raw_readl(efx->membase + reg);
+}
/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index aa7521b24a5d..55c0d9760be8 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -32,7 +32,7 @@
(FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
void falcon_xmac_writel(struct efx_nic *efx,
- efx_dword_t *value, unsigned int mac_reg)
+ efx_dword_t *value, unsigned int mac_reg)
{
efx_oword_t temp;
@@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx)
udelay(10);
}
+ /* This often fails when DSP is disabled, ignore it */
+ if (sfe4001_phy_flash_cfg != 0)
+ return 0;
+
EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
return -ETIMEDOUT;
}
@@ -217,13 +221,13 @@ static int falcon_xgmii_status(struct efx_nic *efx)
{
efx_dword_t reg;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if (falcon_rev(efx) < FALCON_REV_B0)
return 1;
/* The ISR latches, so clear it and re-read */
falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
-
+
if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
@@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
{
efx_dword_t reg;
- if (FALCON_REV(efx) < FALCON_REV_B0)
+ if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
return;
/* Flush the ISR */
@@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
efx_dword_t reg;
int align_done, sync_status, link_ok = 0;
+ if (LOOPBACK_INTERNAL(efx))
+ return 1;
+
/* Read link status */
falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
@@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
}
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+ int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0;
+ int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0;
+ int xgmii_loopback =
+ (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ if (EFX_WORKAROUND_5147(efx)) {
+ int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+ int reset_xgxs;
+
+ falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+ old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN);
+
+ falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+ old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback));
+ if (reset_xgxs) {
+ falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
+ falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+ udelay(1);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
+ EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
+ falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+ udelay(1);
+ }
+ }
+
+ falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ XX_FORCE_SIG_DECODE_FORCED : 0);
+ EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
+ falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+
+ falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
+ EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
+ falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC);
+}
+
+
/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
* to come back up. Bash it until it comes back up */
static int falcon_check_xaui_link_up(struct efx_nic *efx)
@@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
max_tries = tries;
- if (efx->phy_type == PHY_TYPE_NONE)
+ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+ (efx->phy_type == PHY_TYPE_NONE))
return 0;
while (tries) {
@@ -391,12 +454,12 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
__func__, tries);
- (void) falcon_reset_xaui(efx);
+ falcon_reset_xaui(efx);
udelay(200);
tries--;
}
- EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
+ EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
max_tries);
return 0;
}
@@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
falcon_mask_status_intr(efx, 0);
falcon_deconfigure_mac_wrapper(efx);
+
+ efx->tx_disabled = LOOPBACK_INTERNAL(efx);
efx->phy_op->reconfigure(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
falcon_reconfigure_xmac_core(efx);
+
falcon_reconfigure_mac_wrapper(efx);
/* Ensure XAUI link is up */
@@ -491,18 +559,20 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
(mac_stats->rx_bytes - mac_stats->rx_good_bytes);
}
-#define EFX_XAUI_RETRAIN_MAX 8
-
int falcon_check_xmac(struct efx_nic *efx)
{
unsigned xaui_link_ok;
int rc;
+ if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
+ (efx->phy_type == PHY_TYPE_NONE))
+ return 0;
+
falcon_mask_status_intr(efx, 0);
xaui_link_ok = falcon_xaui_link_ok(efx);
if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
- (void) falcon_reset_xaui(efx);
+ falcon_reset_xaui(efx);
/* Call the PHY check_hw routine */
rc = efx->phy_op->check_hw(efx);
@@ -569,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
reset = ((flow_control & EFX_FC_TX) &&
!(efx->flow_control & EFX_FC_TX));
if (EFX_WORKAROUND_11482(efx) && reset) {
- if (FALCON_REV(efx) >= FALCON_REV_B0) {
+ if (falcon_rev(efx) >= FALCON_REV_B0) {
/* Recover by resetting the EM block */
if (efx->link_up)
falcon_drain_tx_fifo(efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index dc06bb0aa575..c4f540e93b79 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
int status;
int phy_id = efx->mii.phy_id;
+ if (LOOPBACK_INTERNAL(efx))
+ return 0;
+
/* Read MMD STATUS2 to check it is responding. */
status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
@@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
int mmd = 0;
int good;
+ /* If the port is in loopback, then we should only consider a subset
+ * of mmd's */
+ if (LOOPBACK_INTERNAL(efx))
+ return 1;
+ else if (efx->loopback_mode == LOOPBACK_NETWORK)
+ return 0;
+ else if (efx->loopback_mode == LOOPBACK_PHYXS)
+ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
+ MDIO_MMDREG_DEVS0_PCS |
+ MDIO_MMDREG_DEVS0_PMAPMD);
+ else if (efx->loopback_mode == LOOPBACK_PCS)
+ mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS |
+ MDIO_MMDREG_DEVS0_PMAPMD);
+ else if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD;
+
while (mmd_mask) {
if (mmd_mask & 1) {
/* Double reads because link state is latched, and a
@@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return ok;
}
+void mdio_clause45_transmit_disable(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_TXDIS);
+ if (efx->tx_disabled)
+ ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+ else
+ ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_TXDIS, ctrl2);
+}
+
+void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ /* Handle (with debouncing) PMA/PMD loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1);
+
+ if (efx->loopback_mode == LOOPBACK_PMAPMD)
+ ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+ MDIO_MMDREG_CTRL1, ctrl2);
+
+ /* Handle (with debouncing) PCS loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
+ MDIO_MMDREG_CTRL1);
+ if (efx->loopback_mode == LOOPBACK_PCS)
+ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS,
+ MDIO_MMDREG_CTRL1, ctrl2);
+
+ /* Handle (with debouncing) PHYXS network loopback */
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+ MDIO_MMDREG_CTRL1);
+ if (efx->loopback_mode == LOOPBACK_NETWORK)
+ ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+ else
+ ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
+
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+ MDIO_MMDREG_CTRL1, ctrl2);
+}
+
/**
* mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
* @efx: Efx NIC
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 2214b6d820a7..cb99f3f4491c 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -44,11 +44,16 @@
#define MDIO_MMDREG_DEVS1 (6)
#define MDIO_MMDREG_CTRL2 (7)
#define MDIO_MMDREG_STAT2 (8)
+#define MDIO_MMDREG_TXDIS (9)
/* Bits in MMDREG_CTRL1 */
/* Reset */
#define MDIO_MMDREG_CTRL1_RESET_LBN (15)
#define MDIO_MMDREG_CTRL1_RESET_WIDTH (1)
+/* Loopback */
+/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
+#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
+#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
/* Bits in MMDREG_STAT1 */
#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
@@ -56,6 +61,9 @@
/* Link state */
#define MDIO_MMDREG_STAT1_LINK_LBN (2)
#define MDIO_MMDREG_STAT1_LINK_WIDTH (1)
+/* Low power ability */
+#define MDIO_MMDREG_STAT1_LPABLE_LBN (1)
+#define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1)
/* Bits in ID reg */
#define MDIO_ID_REV(_id32) (_id32 & 0xf)
@@ -76,6 +84,14 @@
#define MDIO_MMDREG_STAT2_PRESENT_LBN (14)
#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
+/* Bits in MMDREG_TXDIS */
+#define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0)
+#define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1)
+
+/* MMD-specific bits, ordered by MMD, then register */
+#define MDIO_PMAPMD_CTRL1_LBACK_LBN (0)
+#define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1)
+
/* PMA type (4 bits) */
#define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0)
#define MDIO_PMAPMD_CTRL2_10G_EW (0x1)
@@ -95,7 +111,7 @@
#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
-/* /\* PHY XGXS lane state *\/ */
+/* PHY XGXS lane state */
#define MDIO_PHYXS_LANE_STATE (0x18)
#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
@@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
extern int mdio_clause45_links_ok(struct efx_nic *efx,
unsigned int mmd_mask);
+/* Generic transmit disable support though PMAPMD */
+extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
+
+/* Generic part of reconfigure: set/clear loopback bits */
+extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
+
/* Read (some of) the PHY settings over MDIO */
extern void mdio_clause45_get_settings(struct efx_nic *efx,
struct ethtool_cmd *ecmd);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index c505482c2520..5e20e7551dae 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -42,7 +42,7 @@
#ifndef EFX_DRIVER_NAME
#define EFX_DRIVER_NAME "sfc"
#endif
-#define EFX_DRIVER_VERSION "2.2.0136"
+#define EFX_DRIVER_VERSION "2.2"
#ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -52,28 +52,19 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
-#define NET_DEV_REGISTERED(efx) \
- ((efx)->net_dev->reg_state == NETREG_REGISTERED)
-
-/* Include net device name in log messages if it has been registered.
- * Use efx->name not efx->net_dev->name so that races with (un)registration
- * are harmless.
- */
-#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
-
/* Un-rate-limited logging */
#define EFX_ERR(efx, fmt, args...) \
-dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
+dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
#define EFX_INFO(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
+dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
#ifdef EFX_ENABLE_DEBUG
#define EFX_LOG(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
+dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#else
#define EFX_LOG(efx, fmt, args...) \
-dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
+dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#endif
#define EFX_TRACE(efx, fmt, args...) do {} while (0)
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
#define EFX_LOG_RL(efx, fmt, args...) \
do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
-/* Kernel headers may redefine inline anyway */
-#ifndef inline
-#define inline inline __attribute__ ((always_inline))
-#endif
-
/**************************************************************************
*
* Efx data structures
@@ -134,6 +120,8 @@ struct efx_special_buffer {
* Set only on the final fragment of a packet; %NULL for all other
* fragments. When this fragment completes, then we can free this
* skb.
+ * @tsoh: The associated TSO header structure, or %NULL if this
+ * buffer is not a TSO header.
* @dma_addr: DMA address of the fragment.
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
@@ -144,6 +132,7 @@ struct efx_special_buffer {
*/
struct efx_tx_buffer {
const struct sk_buff *skb;
+ struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
unsigned short len;
unsigned char continuation;
@@ -187,6 +176,13 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @tso_headers_free: A list of TSO headers allocated for this TX queue
+ * that are not in use, and so available for new TSO sends. The list
+ * is protected by the TX queue lock.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ * blocks
+ * @tso_packets: Number of packets via the TSO xmit path
*/
struct efx_tx_queue {
/* Members which don't change on the fast path */
@@ -206,6 +202,10 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
unsigned int old_read_count;
+ struct efx_tso_header *tso_headers_free;
+ unsigned int tso_bursts;
+ unsigned int tso_long_headers;
+ unsigned int tso_packets;
};
/**
@@ -434,6 +434,9 @@ struct efx_board {
struct efx_blinker blinker;
};
+#define STRING_TABLE_LOOKUP(val, member) \
+ member ## _names[val]
+
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
@@ -506,6 +509,7 @@ enum efx_fc_type {
* @check_hw: Check hardware
* @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
* @mmds: MMD presence mask
+ * @loopbacks: Supported loopback modes mask
*/
struct efx_phy_operations {
int (*init) (struct efx_nic *efx);
@@ -515,6 +519,7 @@ struct efx_phy_operations {
int (*check_hw) (struct efx_nic *efx);
void (*reset_xaui) (struct efx_nic *efx);
int mmds;
+ unsigned loopbacks;
};
/*
@@ -653,7 +658,6 @@ union efx_multicast_hash {
* @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mii: PHY interface
- * @phy_powered: PHY power state
* @tx_disabled: PHY transmitter turned off
* @link_up: Link status
* @link_options: Link options (MII/GMII format)
@@ -662,6 +666,9 @@ union efx_multicast_hash {
* @multicast_hash: Multicast hash table
* @flow_control: Flow control flags - separate RX/TX so can't use link_options
* @reconfigure_work: work item for dealing with PHY events
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
*
* The @priv field of the corresponding &struct net_device points to
* this.
@@ -674,7 +681,7 @@ struct efx_nic {
struct workqueue_struct *workqueue;
struct work_struct reset_work;
struct delayed_work monitor_work;
- unsigned long membase_phys;
+ resource_size_t membase_phys;
void __iomem *membase;
spinlock_t biu_lock;
enum efx_int_mode interrupt_mode;
@@ -698,7 +705,7 @@ struct efx_nic {
unsigned n_rx_nodesc_drop_cnt;
- void *nic_data;
+ struct falcon_nic_data *nic_data;
struct mutex mac_lock;
int port_enabled;
@@ -721,6 +728,7 @@ struct efx_nic {
struct efx_phy_operations *phy_op;
void *phy_data;
struct mii_if_info mii;
+ unsigned tx_disabled;
int link_up;
unsigned int link_options;
@@ -732,8 +740,26 @@ struct efx_nic {
struct work_struct reconfigure_work;
atomic_t rx_reset;
+ enum efx_loopback_mode loopback_mode;
+ unsigned int loopback_modes;
+
+ void *loopback_selftest;
};
+static inline int efx_dev_registered(struct efx_nic *efx)
+{
+ return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+/* Net device name, for inclusion in log messages if it has been registered.
+ * Use efx->name not efx->net_dev->name so that races with (un)registration
+ * are harmless.
+ */
+static inline const char *efx_dev_name(struct efx_nic *efx)
+{
+ return efx_dev_registered(efx) ? efx->name : "";
+}
+
/**
* struct efx_nic_type - Efx device type definition
* @mem_bar: Memory BAR number
@@ -769,7 +795,7 @@ struct efx_nic_type {
unsigned int txd_ring_mask;
unsigned int rxd_ring_mask;
unsigned int evq_size;
- dma_addr_t max_dma_mask;
+ u64 max_dma_mask;
unsigned int tx_dma_mask;
unsigned bug5391_mask;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 551299b462ae..601b001437c0 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -19,6 +19,7 @@
#include "rx.h"
#include "efx.h"
#include "falcon.h"
+#include "selftest.h"
#include "workarounds.h"
/* Number of RX descriptors pushed at once. */
@@ -85,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
*/
#define EFX_RXD_HEAD_ROOM 2
-/* Macros for zero-order pages (potentially) containing multiple RX buffers */
-#define RX_DATA_OFFSET(_data) \
- (((unsigned long) (_data)) & (PAGE_SIZE-1))
-#define RX_BUF_OFFSET(_rx_buf) \
- RX_DATA_OFFSET((_rx_buf)->data)
-
-#define RX_PAGE_SIZE(_efx) \
- (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
+static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
+{
+ /* Offset is always within one page, so we don't need to consider
+ * the page order.
+ */
+ return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
+}
+static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
+{
+ return PAGE_SIZE << efx->rx_buffer_order;
+}
/**************************************************************************
@@ -105,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
void **tcpudp_hdr, u64 *hdr_flags, void *priv)
{
- struct efx_channel *channel = (struct efx_channel *)priv;
+ struct efx_channel *channel = priv;
struct iphdr *iph;
struct tcphdr *th;
@@ -130,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
void *priv)
{
- struct efx_channel *channel = (struct efx_channel *)priv;
+ struct efx_channel *channel = priv;
struct ethhdr *eh;
struct iphdr *iph;
/* We support EtherII and VLAN encapsulated IPv4 */
- eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
+ eh = page_address(frag->page) + frag->page_offset;
*mac_hdr = eh;
if (eh->h_proto == htons(ETH_P_IP)) {
@@ -268,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
- 0, RX_PAGE_SIZE(efx),
+ 0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(dma_addr))) {
@@ -279,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_page = rx_buf->page;
rx_queue->buf_dma_addr = dma_addr;
- rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
+ rx_queue->buf_data = (page_address(rx_buf->page) +
EFX_PAGE_IP_ALIGN);
}
- offset = RX_DATA_OFFSET(rx_queue->buf_data);
rx_buf->len = bytes;
- rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
rx_buf->data = rx_queue->buf_data;
+ offset = efx_rx_buf_offset(rx_buf);
+ rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
/* Try to pack multiple buffers per page */
if (efx->rx_buffer_order == 0) {
@@ -294,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
offset += ((bytes + 0x1ff) & ~0x1ff);
- space = RX_PAGE_SIZE(efx) - offset;
+ space = efx_rx_buf_size(efx) - offset;
if (space >= bytes) {
/* Refs dropped on kernel releasing each skb */
get_page(rx_queue->buf_page);
@@ -343,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
EFX_BUG_ON_PARANOID(rx_buf->skb);
if (rx_buf->unmap_addr) {
pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
- RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
rx_buf->unmap_addr = 0;
}
} else if (likely(rx_buf->skb)) {
@@ -399,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
return 0;
/* Record minimum fill level */
- if (unlikely(fill_level < rx_queue->min_fill))
+ if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level)
rx_queue->min_fill = fill_level;
+ }
/* Acquire RX add lock. If this lock is contended, then a fast
* fill must already be in progress (e.g. in the refill
@@ -551,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
struct skb_frag_struct frags;
frags.page = rx_buf->page;
- frags.page_offset = RX_BUF_OFFSET(rx_buf);
+ frags.page_offset = efx_rx_buf_offset(rx_buf);
frags.size = rx_buf->len;
lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@@ -596,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
if (unlikely(rx_buf->len > hdr_len)) {
struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
frag->page = rx_buf->page;
- frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
+ frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
frag->size = skb->len - hdr_len;
skb_shinfo(skb)->nr_frags = 1;
skb->data_len = frag->size;
@@ -683,6 +689,15 @@ void __efx_rx_packet(struct efx_channel *channel,
struct sk_buff *skb;
int lro = efx->net_dev->features & NETIF_F_LRO;
+ /* If we're in loopback test, then pass the packet directly to the
+ * loopback layer, and free the rx_buf here
+ */
+ if (unlikely(efx->loopback_selftest)) {
+ efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
+ efx_free_rx_buffer(efx, rx_buf);
+ goto done;
+ }
+
if (rx_buf->skb) {
prefetch(skb_shinfo(rx_buf->skb));
@@ -736,7 +751,6 @@ void __efx_rx_packet(struct efx_channel *channel,
/* Update allocation strategy method */
channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- /* fall-thru */
done:
efx->net_dev->last_rx = jiffies;
}
@@ -842,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
/* For a page that is part-way through splitting into RX buffers */
if (rx_queue->buf_page != NULL) {
pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
- RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
+ efx_rx_buf_size(rx_queue->efx),
+ PCI_DMA_FROMDEVICE);
__free_pages(rx_queue->buf_page,
rx_queue->efx->rx_buffer_order);
rx_queue->buf_page = NULL;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
new file mode 100644
index 000000000000..3b2de9fe7f27
--- /dev/null
+++ b/drivers/net/sfc/selftest.c
@@ -0,0 +1,719 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <asm/io.h>
+#include "net_driver.h"
+#include "ethtool.h"
+#include "efx.h"
+#include "falcon.h"
+#include "selftest.h"
+#include "boards.h"
+#include "workarounds.h"
+#include "mac.h"
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+ struct ethhdr header;
+ struct iphdr ip;
+ struct udphdr udp;
+ __be16 iteration;
+ const char msg[64];
+} __attribute__ ((packed));
+
+/* Loopback test source MAC address */
+static const unsigned char payload_source[ETH_ALEN] = {
+ 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char *payload_msg =
+ "Hello world! This is an Efx loopback test in progress!";
+
+/**
+ * efx_selftest_state - persistent state during a selftest
+ * @flush: Drop all packets in efx_loopback_rx_packet
+ * @packet_count: Number of packets being used in this test
+ * @skbs: An array of skbs transmitted
+ * @rx_good: RX good packet count
+ * @rx_bad: RX bad packet count
+ * @payload: Payload used in tests
+ */
+struct efx_selftest_state {
+ int flush;
+ int packet_count;
+ struct sk_buff **skbs;
+ atomic_t rx_good;
+ atomic_t rx_bad;
+ struct efx_loopback_payload payload;
+};
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************/
+
+/* Level of loopback testing
+ *
+ * The maximum packet burst length is 16**(n-1), i.e.
+ *
+ * - Level 0 : no packets
+ * - Level 1 : 1 packet
+ * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets)
+ * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets)
+ *
+ */
+static unsigned int loopback_test_level = 3;
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+
+ EFX_LOG(efx, "testing interrupts\n");
+ tests->interrupt = -1;
+
+ /* Reset interrupt flag */
+ efx->last_irq_cpu = -1;
+ smp_wmb();
+
+ /* ACK each interrupting event queue. Receiving an interrupt due to
+ * traffic before a test event is raised is considered a pass */
+ efx_for_each_channel_with_interrupt(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ if (efx->last_irq_cpu >= 0)
+ goto success;
+ }
+
+ falcon_generate_interrupt(efx);
+
+ /* Wait for arrival of test interrupt. */
+ EFX_LOG(efx, "waiting for test interrupt\n");
+ schedule_timeout_uninterruptible(HZ / 10);
+ if (efx->last_irq_cpu >= 0)
+ goto success;
+
+ EFX_ERR(efx, "timed out waiting for interrupt\n");
+ return -ETIMEDOUT;
+
+ success:
+ EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n",
+ efx->interrupt_mode, efx->last_irq_cpu);
+ tests->interrupt = 1;
+ return 0;
+}
+
+/* Test generation and receipt of non-interrupting events */
+static int efx_test_eventq(struct efx_channel *channel,
+ struct efx_self_tests *tests)
+{
+ unsigned int magic;
+
+ /* Channel specific code, limited to 20 bits */
+ magic = (0x00010150 + channel->channel);
+ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+ channel->channel, magic);
+
+ tests->eventq_dma[channel->channel] = -1;
+ tests->eventq_int[channel->channel] = 1; /* fake pass */
+ tests->eventq_poll[channel->channel] = 1; /* fake pass */
+
+ /* Reset flag and zero magic word */
+ channel->efx->last_irq_cpu = -1;
+ channel->eventq_magic = 0;
+ smp_wmb();
+
+ falcon_generate_test_event(channel, magic);
+ udelay(1);
+
+ efx_process_channel_now(channel);
+ if (channel->eventq_magic != magic) {
+ EFX_ERR(channel->efx, "channel %d failed to see test event\n",
+ channel->channel);
+ return -ETIMEDOUT;
+ } else {
+ tests->eventq_dma[channel->channel] = 1;
+ }
+
+ return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_channel *channel,
+ struct efx_self_tests *tests)
+{
+ unsigned int magic, count;
+
+ /* Channel specific code, limited to 20 bits */
+ magic = (0x00010150 + channel->channel);
+ EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
+ channel->channel, magic);
+
+ tests->eventq_dma[channel->channel] = -1;
+ tests->eventq_int[channel->channel] = -1;
+ tests->eventq_poll[channel->channel] = -1;
+
+ /* Reset flag and zero magic word */
+ channel->efx->last_irq_cpu = -1;
+ channel->eventq_magic = 0;
+ smp_wmb();
+
+ falcon_generate_test_event(channel, magic);
+
+ /* Wait for arrival of interrupt */
+ count = 0;
+ do {
+ schedule_timeout_uninterruptible(HZ / 100);
+
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+
+ if (channel->eventq_magic == magic)
+ goto eventq_ok;
+ } while (++count < 2);
+
+ EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
+ channel->channel);
+
+ /* See if interrupt arrived */
+ if (channel->efx->last_irq_cpu >= 0) {
+ EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
+ "during event queue test\n", channel->channel,
+ raw_smp_processor_id());
+ tests->eventq_int[channel->channel] = 1;
+ }
+
+ /* Check to see if event was received even if interrupt wasn't */
+ efx_process_channel_now(channel);
+ if (channel->eventq_magic == magic) {
+ EFX_ERR(channel->efx, "channel %d event was generated, but "
+ "failed to trigger an interrupt\n", channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ }
+
+ return -ETIMEDOUT;
+ eventq_ok:
+ EFX_LOG(channel->efx, "channel %d event queue passed\n",
+ channel->channel);
+ tests->eventq_dma[channel->channel] = 1;
+ tests->eventq_int[channel->channel] = 1;
+ tests->eventq_poll[channel->channel] = 1;
+ return 0;
+}
+
+/**************************************************************************
+ *
+ * PHY testing
+ *
+ **************************************************************************/
+
+/* Check PHY presence by reading the PHY ID registers */
+static int efx_test_phy(struct efx_nic *efx,
+ struct efx_self_tests *tests)
+{
+ u16 physid1, physid2;
+ struct mii_if_info *mii = &efx->mii;
+ struct net_device *net_dev = efx->net_dev;
+
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return 0;
+
+ EFX_LOG(efx, "testing PHY presence\n");
+ tests->phy_ok = -1;
+
+ physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
+ physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
+
+ if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
+ (physid2 != 0x0000) && (physid2 != 0xffff)) {
+ EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
+ mii->phy_id, physid1, physid2);
+ tests->phy_ok = 1;
+ return 0;
+ }
+
+ EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
+ return -ENODEV;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *received;
+ struct efx_loopback_payload *payload;
+
+ BUG_ON(!buf_ptr);
+
+ /* If we are just flushing, then drop the packet */
+ if ((state == NULL) || state->flush)
+ return;
+
+ payload = &state->payload;
+
+ received = (struct efx_loopback_payload *) buf_ptr;
+ received->ip.saddr = payload->ip.saddr;
+ received->ip.check = payload->ip.check;
+
+ /* Check that header exists */
+ if (pkt_len < sizeof(received->header)) {
+ EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that the ethernet header exists */
+ if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+ EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check packet length */
+ if (pkt_len != sizeof(*payload)) {
+ EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that IP header matches */
+ if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+ EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that msg and padding matches */
+ if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+ EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Check that iteration matches */
+ if (received->iteration != payload->iteration) {
+ EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ goto err;
+ }
+
+ /* Increase correct RX count */
+ EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
+ LOOPBACK_MODE(efx));
+
+ atomic_inc(&state->rx_good);
+ return;
+
+ err:
+#ifdef EFX_ENABLE_DEBUG
+ if (atomic_read(&state->rx_bad) == 0) {
+ EFX_ERR(efx, "received packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ buf_ptr, pkt_len, 0);
+ EFX_ERR(efx, "expected packet:\n");
+ print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+ &state->payload, sizeof(state->payload), 0);
+ }
+#endif
+ atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_loopback_payload *payload = &state->payload;
+
+ /* Initialise the layerII header */
+ memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
+ memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+ payload->header.h_proto = htons(ETH_P_IP);
+
+ /* saddr set later and used as incrementing count */
+ payload->ip.daddr = htonl(INADDR_LOOPBACK);
+ payload->ip.ihl = 5;
+ payload->ip.check = htons(0xdead);
+ payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+ payload->ip.version = IPVERSION;
+ payload->ip.protocol = IPPROTO_UDP;
+
+ /* Initialise udp header */
+ payload->udp.source = 0;
+ payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+ sizeof(struct iphdr));
+ payload->udp.check = 0; /* checksum ignored */
+
+ /* Fill out payload */
+ payload->iteration = htons(ntohs(payload->iteration) + 1);
+ memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+ /* Fill out remaining state members */
+ atomic_set(&state->rx_good, 0);
+ atomic_set(&state->rx_bad, 0);
+ smp_wmb();
+}
+
+static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_loopback_payload *payload;
+ struct sk_buff *skb;
+ int i, rc;
+
+ /* Transmit N copies of buffer */
+ for (i = 0; i < state->packet_count; i++) {
+ /* Allocate an skb, holding an extra reference for
+ * transmit completion counting */
+ skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+ state->skbs[i] = skb;
+ skb_get(skb);
+
+ /* Copy the payload in, incrementing the source address to
+ * exercise the rss vectors */
+ payload = ((struct efx_loopback_payload *)
+ skb_put(skb, sizeof(state->payload)));
+ memcpy(payload, &state->payload, sizeof(state->payload));
+ payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+ /* Ensure everything we've written is visible to the
+ * interrupt handler. */
+ smp_wmb();
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock_bh(efx->net_dev);
+ rc = efx_xmit(efx, tx_queue, skb);
+ if (efx_dev_registered(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ if (rc != NETDEV_TX_OK) {
+ EFX_ERR(efx, "TX queue %d could not transmit packet %d "
+ "of %d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count, LOOPBACK_MODE(efx));
+
+ /* Defer cleaning up the other skbs for the caller */
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct sk_buff *skb;
+ int tx_done = 0, rx_good, rx_bad;
+ int i, rc = 0;
+
+ if (efx_dev_registered(efx))
+ netif_tx_lock_bh(efx->net_dev);
+
+ /* Count the number of tx completions, and decrement the refcnt. Any
+ * skbs not already completed will be free'd when the queue is flushed */
+ for (i=0; i < state->packet_count; i++) {
+ skb = state->skbs[i];
+ if (skb && !skb_shared(skb))
+ ++tx_done;
+ dev_kfree_skb_any(skb);
+ }
+
+ if (efx_dev_registered(efx))
+ netif_tx_unlock_bh(efx->net_dev);
+
+ /* Check TX completion and received packet counts */
+ rx_good = atomic_read(&state->rx_good);
+ rx_bad = atomic_read(&state->rx_bad);
+ if (tx_done != state->packet_count) {
+ /* Don't free the skbs; they will be picked up on TX
+ * overflow or channel teardown.
+ */
+ EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Allow to fall through so we see the RX errors as well */
+ }
+
+ /* We may always be up to a flush away from our desired packet total */
+ if (rx_good != state->packet_count) {
+ EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
+ rc = -ETIMEDOUT;
+ /* Fall through */
+ }
+
+ /* Update loopback test structure */
+ lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
+ lb_tests->tx_done[tx_queue->queue] += tx_done;
+ lb_tests->rx_good += rx_good;
+ lb_tests->rx_bad += rx_bad;
+
+ return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+ struct efx_loopback_self_tests *lb_tests)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct efx_channel *channel;
+ int i, rc = 0;
+
+ for (i = 0; i < loopback_test_level; i++) {
+ /* Determine how many packets to send */
+ state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
+ state->packet_count = min(1 << (i << 2), state->packet_count);
+ state->skbs = kzalloc(sizeof(state->skbs[0]) *
+ state->packet_count, GFP_KERNEL);
+ if (!state->skbs)
+ return -ENOMEM;
+ state->flush = 0;
+
+ EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
+ "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ efx_iterate_state(efx);
+ rc = efx_tx_loopback(tx_queue);
+
+ /* NAPI polling is not enabled, so process channels synchronously */
+ schedule_timeout_uninterruptible(HZ / 50);
+ efx_for_each_channel_with_interrupt(channel, efx) {
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ }
+
+ rc |= efx_rx_loopback(tx_queue, lb_tests);
+ kfree(state->skbs);
+
+ if (rc) {
+ /* Wait a while to ensure there are no packets
+ * floating around after a failure. */
+ schedule_timeout_uninterruptible(HZ / 10);
+ return rc;
+ }
+ }
+
+ EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
+
+ return rc;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ unsigned int loopback_modes)
+{
+ struct efx_selftest_state *state = efx->loopback_selftest;
+ struct ethtool_cmd ecmd, ecmd_loopback;
+ struct efx_tx_queue *tx_queue;
+ enum efx_loopback_mode old_mode, mode;
+ int count, rc = 0, link_up;
+
+ rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
+ if (rc) {
+ EFX_ERR(efx, "could not get GMII settings\n");
+ return rc;
+ }
+ old_mode = efx->loopback_mode;
+
+ /* Disable autonegotiation for the purposes of loopback */
+ memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
+ if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
+ ecmd_loopback.autoneg = AUTONEG_DISABLE;
+ ecmd_loopback.duplex = DUPLEX_FULL;
+ ecmd_loopback.speed = SPEED_10000;
+ }
+
+ rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback);
+ if (rc) {
+ EFX_ERR(efx, "could not disable autonegotiation\n");
+ goto out;
+ }
+ tests->loopback_speed = ecmd_loopback.speed;
+ tests->loopback_full_duplex = ecmd_loopback.duplex;
+
+ /* Test all supported loopback modes */
+ for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) {
+ if (!(loopback_modes & (1 << mode)))
+ continue;
+
+ /* Move the port into the specified loopback mode. */
+ state->flush = 1;
+ efx->loopback_mode = mode;
+ efx_reconfigure_port(efx);
+
+ /* Wait for the PHY to signal the link is up */
+ count = 0;
+ do {
+ struct efx_channel *channel = &efx->channel[0];
+
+ falcon_check_xmac(efx);
+ schedule_timeout_uninterruptible(HZ / 10);
+ if (channel->work_pending)
+ efx_process_channel_now(channel);
+ /* Wait for PHY events to be processed */
+ flush_workqueue(efx->workqueue);
+ rmb();
+
+ /* efx->link_up can be 1 even if the XAUI link is down,
+ * (bug5762). Usually, it's not worth bothering with the
+ * difference, but for selftests, we need that extra
+ * guarantee that the link is really, really, up.
+ */
+ link_up = efx->link_up;
+ if (!falcon_xaui_link_ok(efx))
+ link_up = 0;
+
+ } while ((++count < 20) && !link_up);
+
+ /* The link should now be up. If it isn't, there is no point
+ * in attempting a loopback test */
+ if (!link_up) {
+ EFX_ERR(efx, "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
+ rc = -EIO;
+ goto out;
+ }
+
+ EFX_LOG(efx, "link came up in %s loopback in %d iterations\n",
+ LOOPBACK_MODE(efx), count);
+
+ /* Test every TX queue */
+ efx_for_each_tx_queue(tx_queue, efx) {
+ rc |= efx_test_loopback(tx_queue,
+ &tests->loopback[mode]);
+ if (rc)
+ goto out;
+ }
+ }
+
+ out:
+ /* Take out of loopback and restore PHY settings */
+ state->flush = 1;
+ efx->loopback_mode = old_mode;
+ efx_ethtool_set_settings(efx->net_dev, &ecmd);
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry points
+ *
+ *************************************************************************/
+
+/* Online (i.e. non-disruptive) testing
+ * This checks interrupt generation, event delivery and PHY presence. */
+int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+ struct efx_channel *channel;
+ int rc = 0;
+
+ EFX_LOG(efx, "performing online self-tests\n");
+
+ rc |= efx_test_interrupts(efx, tests);
+ efx_for_each_channel(channel, efx) {
+ if (channel->has_interrupt)
+ rc |= efx_test_eventq_irq(channel, tests);
+ else
+ rc |= efx_test_eventq(channel, tests);
+ }
+ rc |= efx_test_phy(efx, tests);
+
+ if (rc)
+ EFX_ERR(efx, "failed online self-tests\n");
+
+ return rc;
+}
+
+/* Offline (i.e. disruptive) testing
+ * This checks MAC and PHY loopback on the specified port. */
+int efx_offline_test(struct efx_nic *efx,
+ struct efx_self_tests *tests, unsigned int loopback_modes)
+{
+ struct efx_selftest_state *state;
+ int rc = 0;
+
+ EFX_LOG(efx, "performing offline self-tests\n");
+
+ /* Create a selftest_state structure to hold state for the test */
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Set the port loopback_selftest member. From this point on
+ * all received packets will be dropped. Mark the state as
+ * "flushing" so all inflight packets are dropped */
+ BUG_ON(efx->loopback_selftest);
+ state->flush = 1;
+ efx->loopback_selftest = state;
+
+ rc = efx_test_loopbacks(efx, tests, loopback_modes);
+
+ efx->loopback_selftest = NULL;
+ wmb();
+ kfree(state);
+
+ out:
+ if (rc)
+ EFX_ERR(efx, "failed offline self-tests\n");
+
+ return rc;
+}
+
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
new file mode 100644
index 000000000000..f6999c2b622d
--- /dev/null
+++ b/drivers/net/sfc/selftest.h
@@ -0,0 +1,50 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+ int tx_sent[EFX_MAX_TX_QUEUES];
+ int tx_done[EFX_MAX_TX_QUEUES];
+ int rx_good;
+ int rx_bad;
+};
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure.
+ */
+struct efx_self_tests {
+ int interrupt;
+ int eventq_dma[EFX_MAX_CHANNELS];
+ int eventq_int[EFX_MAX_CHANNELS];
+ int eventq_poll[EFX_MAX_CHANNELS];
+ int phy_ok;
+ int loopback_speed;
+ int loopback_full_duplex;
+ struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX];
+};
+
+extern void efx_loopback_rx_packet(struct efx_nic *efx,
+ const char *buf_ptr, int pkt_len);
+extern int efx_online_test(struct efx_nic *efx,
+ struct efx_self_tests *tests);
+extern int efx_offline_test(struct efx_nic *efx,
+ struct efx_self_tests *tests,
+ unsigned int loopback_modes);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 11fa9fb8f48b..66a0d1442aba 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -116,20 +116,29 @@ void sfe4001_poweroff(struct efx_nic *efx)
/* Turn off all power rails */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+ efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */
cfg = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
+ efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
/* Disable port 0 outputs on IO expander */
cfg = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
+ efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
/* Clear any over-temperature alert */
- (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
+ efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
}
+/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
+ * to the FLASH_CFG_1 input on the DSP. We must keep it high at power-
+ * up to allow writing the flash (done through MDIO from userland).
+ */
+unsigned int sfe4001_phy_flash_cfg;
+module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444);
+MODULE_PARM_DESC(phy_flash_cfg,
+ "Force PHY to enter flash configuration mode");
+
/* This board uses an I2C expander to provider power to the PHY, which needs to
* be turned on before the PHY can be used.
* Context: Process context, rtnl lock held
@@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx)
out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
(1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
(1 << P0_X_TRST_LBN));
+ if (sfe4001_phy_flash_cfg)
+ out |= 1 << P0_EN_3V3X_LBN;
rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
if (rc)
@@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx)
if (in & (1 << P1_AFE_PWD_LBN))
goto done;
+ /* DSP doesn't look powered in flash config mode */
+ if (sfe4001_phy_flash_cfg)
+ goto done;
} while (++count < 20);
EFX_INFO(efx, "timed out waiting for power\n");
@@ -239,14 +253,14 @@ done:
fail3:
/* Turn off all power rails */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+ efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
+ efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
fail2:
/* Disable port 0 outputs on IO expander */
out = 0xff;
- (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
+ efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
fail1:
return rc;
}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index a2e9f79e47b1..c0146061c326 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -24,6 +24,11 @@
MDIO_MMDREG_DEVS0_PCS | \
MDIO_MMDREG_DEVS0_PHYXS)
+#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
+ (1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_NETWORK))
+
/* We complain if we fail to see the link partner as 10G capable this many
* times in a row (must be > 1 as sampling the autoneg. registers is racy)
*/
@@ -72,6 +77,10 @@
#define PMA_PMD_BIST_RXD_LBN (1)
#define PMA_PMD_BIST_AFE_LBN (0)
+/* Special Software reset register */
+#define PMA_PMD_EXT_CTRL_REG 49152
+#define PMA_PMD_EXT_SSR_LBN 15
+
#define BIST_MAX_DELAY (1000)
#define BIST_POLL_DELAY (10)
@@ -86,6 +95,11 @@
#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */
#define CLK312_EN_LBN 3
+/* PHYXS registers */
+#define PHYXS_TEST1 (49162)
+#define LOOPBACK_NEAR_LBN (8)
+#define LOOPBACK_NEAR_WIDTH (1)
+
/* Boot status register */
#define PCS_BOOT_STATUS_REG (0xd000)
#define PCS_BOOT_FATAL_ERR_LBN (0)
@@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
struct tenxpress_phy_data {
enum tenxpress_state state;
+ enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
+ int tx_disabled;
int bad_lp_tries;
};
@@ -195,14 +211,18 @@ static int tenxpress_phy_init(struct efx_nic *efx)
int rc = 0;
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
efx->phy_data = phy_data;
tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
- rc = mdio_clause45_wait_reset_mmds(efx,
- TENXPRESS_REQUIRED_DEVS);
- if (rc < 0)
- goto fail;
+ if (!sfe4001_phy_flash_cfg) {
+ rc = mdio_clause45_wait_reset_mmds(efx,
+ TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ goto fail;
+ }
rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
if (rc < 0)
@@ -225,6 +245,35 @@ static int tenxpress_phy_init(struct efx_nic *efx)
return rc;
}
+static int tenxpress_special_reset(struct efx_nic *efx)
+{
+ int rc, reg;
+
+ EFX_TRACE(efx, "%s\n", __func__);
+
+ /* Initiate reset */
+ reg = mdio_clause45_read(efx, efx->mii.phy_id,
+ MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG);
+ reg |= (1 << PMA_PMD_EXT_SSR_LBN);
+ mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+ PMA_PMD_EXT_CTRL_REG, reg);
+
+ msleep(200);
+
+ /* Wait for the blocks to come out of reset */
+ rc = mdio_clause45_wait_reset_mmds(efx,
+ TENXPRESS_REQUIRED_DEVS);
+ if (rc < 0)
+ return rc;
+
+ /* Try and reconfigure the device */
+ rc = tenxpress_init(efx);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
{
struct tenxpress_phy_data *pd = efx->phy_data;
@@ -299,11 +348,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
return ok;
}
+static void tenxpress_phyxs_loopback(struct efx_nic *efx)
+{
+ int phy_id = efx->mii.phy_id;
+ int ctrl1, ctrl2;
+
+ ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
+ PHYXS_TEST1);
+ if (efx->loopback_mode == LOOPBACK_PHYXS)
+ ctrl2 |= (1 << LOOPBACK_NEAR_LBN);
+ else
+ ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN);
+ if (ctrl1 != ctrl2)
+ mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS,
+ PHYXS_TEST1, ctrl2);
+}
+
static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
+ struct tenxpress_phy_data *phy_data = efx->phy_data;
+ int loop_change = LOOPBACK_OUT_OF(phy_data, efx,
+ TENXPRESS_LOOPBACKS);
+
if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
return;
+ /* When coming out of transmit disable, coming out of low power
+ * mode, or moving out of any PHY internal loopback mode,
+ * perform a special software reset */
+ if ((phy_data->tx_disabled && !efx->tx_disabled) ||
+ loop_change) {
+ tenxpress_special_reset(efx);
+ falcon_reset_xaui(efx);
+ }
+
+ mdio_clause45_transmit_disable(efx);
+ mdio_clause45_phy_reconfigure(efx);
+ tenxpress_phyxs_loopback(efx);
+
+ phy_data->tx_disabled = efx->tx_disabled;
+ phy_data->loopback_mode = efx->loopback_mode;
efx->link_up = tenxpress_link_ok(efx, 0);
efx->link_options = GM_LPA_10000FULL;
}
@@ -431,4 +515,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
.clear_interrupt = tenxpress_phy_clear_interrupt,
.reset_xaui = tenxpress_reset_xaui,
.mmds = TENXPRESS_REQUIRED_DEVS,
+ .loopbacks = TENXPRESS_LOOPBACKS,
};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index fbb866b2185e..5cdd082ab8f6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
}
+/**
+ * struct efx_tso_header - a DMA mapped buffer for packet headers
+ * @next: Linked list of free ones.
+ * The list is protected by the TX queue lock.
+ * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
+ * @dma_addr: The DMA address of the header below.
+ *
+ * This controls the memory used for a TSO header. Use TSOH_DATA()
+ * to find the packet header data. Use TSOH_SIZE() to calculate the
+ * total size required for a given packet header length. TSO headers
+ * in the free list are exactly %TSOH_STD_SIZE bytes in size.
+ */
+struct efx_tso_header {
+ union {
+ struct efx_tso_header *next;
+ size_t unmap_len;
+ };
+ dma_addr_t dma_addr;
+};
+
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb);
+static void efx_fini_tso(struct efx_tx_queue *tx_queue);
+static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh);
+
+static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer)
+{
+ if (buffer->tsoh) {
+ if (likely(!buffer->tsoh->unmap_len)) {
+ buffer->tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = buffer->tsoh;
+ } else {
+ efx_tsoh_heap_free(tx_queue, buffer->tsoh);
+ }
+ buffer->tsoh = NULL;
+ }
+}
+
/*
* Add a socket buffer to a TX queue
@@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+ if (skb_shinfo((struct sk_buff *)skb)->gso_size)
+ return efx_enqueue_skb_tso(tx_queue, skb);
+
/* Get size of the initial fragment */
len = skb_headlen(skb);
@@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
insert_ptr = (tx_queue->insert_count &
efx->type->txd_ring_mask);
buffer = &tx_queue->buffer[insert_ptr];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
EFX_BUG_ON_PARANOID(buffer->skb);
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->continuation != 1);
@@ -342,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if (unlikely(tx_queue->stopped)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
- EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
+ EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
/* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */
@@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
efx_release_tx_buffers(tx_queue);
+ /* Free up TSO header cache */
+ efx_fini_tso(tx_queue);
+
/* Release queue's stop on port, if any */
if (tx_queue->stopped) {
tx_queue->stopped = 0;
@@ -450,3 +498,622 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
}
+/* Efx TCP segmentation acceleration.
+ *
+ * Why? Because by doing it here in the driver we can go significantly
+ * faster than the GSO.
+ *
+ * Requires TX checksum offload support.
+ */
+
+/* Number of bytes inserted at the start of a TSO header buffer,
+ * similar to NET_IP_ALIGN.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#define TSOH_OFFSET 0
+#else
+#define TSOH_OFFSET NET_IP_ALIGN
+#endif
+
+#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
+
+/* Total size of struct efx_tso_header, buffer and padding */
+#define TSOH_SIZE(hdr_len) \
+ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
+
+/* Size of blocks on free list. Larger blocks must be allocated from
+ * the heap.
+ */
+#define TSOH_STD_SIZE 128
+
+#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
+#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
+#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
+#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
+
+/**
+ * struct tso_state - TSO state for an SKB
+ * @remaining_len: Bytes of data we've yet to segment
+ * @seqnum: Current sequence number
+ * @packet_space: Remaining space in current packet
+ * @ifc: Input fragment cursor.
+ * Where we are in the current fragment of the incoming SKB. These
+ * values get updated in place when we split a fragment over
+ * multiple packets.
+ * @p: Parameters.
+ * These values are set once at the start of the TSO send and do
+ * not get changed as the routine progresses.
+ *
+ * The state used during segmentation. It is put into this data structure
+ * just to make it easy to pass into inline functions.
+ */
+struct tso_state {
+ unsigned remaining_len;
+ unsigned seqnum;
+ unsigned packet_space;
+
+ struct {
+ /* DMA address of current position */
+ dma_addr_t dma_addr;
+ /* Remaining length */
+ unsigned int len;
+ /* DMA address and length of the whole fragment */
+ unsigned int unmap_len;
+ dma_addr_t unmap_addr;
+ struct page *page;
+ unsigned page_off;
+ } ifc;
+
+ struct {
+ /* The number of bytes of header */
+ unsigned int header_length;
+
+ /* The number of bytes to put in each outgoing segment. */
+ int full_packet_size;
+
+ /* Current IPv4 ID, host endian. */
+ unsigned ipv4_id;
+ } p;
+};
+
+
+/*
+ * Verify that our various assumptions about sk_buffs and the conditions
+ * under which TSO will be attempted hold true.
+ */
+static inline void efx_tso_check_safe(const struct sk_buff *skb)
+{
+ EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
+ EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
+ skb->protocol);
+ EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
+ EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
+ + (tcp_hdr(skb)->doff << 2u)) >
+ skb_headlen(skb));
+}
+
+
+/*
+ * Allocate a page worth of efx_tso_header structures, and string them
+ * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
+ */
+static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
+{
+
+ struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct efx_tso_header *tsoh;
+ dma_addr_t dma_addr;
+ u8 *base_kva, *kva;
+
+ base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ if (base_kva == NULL) {
+ EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
+ " headers\n");
+ return -ENOMEM;
+ }
+
+ /* pci_alloc_consistent() allocates pages. */
+ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
+
+ for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
+ tsoh = (struct efx_tso_header *)kva;
+ tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
+ tsoh->next = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh;
+ }
+
+ return 0;
+}
+
+
+/* Free up a TSO header, and all others in the same page. */
+static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh,
+ struct pci_dev *pci_dev)
+{
+ struct efx_tso_header **p;
+ unsigned long base_kva;
+ dma_addr_t base_dma;
+
+ base_kva = (unsigned long)tsoh & PAGE_MASK;
+ base_dma = tsoh->dma_addr & PAGE_MASK;
+
+ p = &tx_queue->tso_headers_free;
+ while (*p != NULL) {
+ if (((unsigned long)*p & PAGE_MASK) == base_kva)
+ *p = (*p)->next;
+ else
+ p = &(*p)->next;
+ }
+
+ pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+}
+
+static struct efx_tso_header *
+efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
+{
+ struct efx_tso_header *tsoh;
+
+ tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
+ if (unlikely(!tsoh))
+ return NULL;
+
+ tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ TSOH_BUFFER(tsoh), header_len,
+ PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+ kfree(tsoh);
+ return NULL;
+ }
+
+ tsoh->unmap_len = header_len;
+ return tsoh;
+}
+
+static void
+efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
+{
+ pci_unmap_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr, tsoh->unmap_len,
+ PCI_DMA_TODEVICE);
+ kfree(tsoh);
+}
+
+/**
+ * efx_tx_queue_insert - push descriptors onto the TX queue
+ * @tx_queue: Efx TX queue
+ * @dma_addr: DMA address of fragment
+ * @len: Length of fragment
+ * @skb: Only non-null for end of last segment
+ * @end_of_packet: True if last fragment in a packet
+ * @unmap_addr: DMA address of fragment for unmapping
+ * @unmap_len: Only set this in last segment of a fragment
+ *
+ * Push descriptors onto the TX queue. Return 0 on success or 1 if
+ * @tx_queue full.
+ */
+static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, unsigned len,
+ const struct sk_buff *skb, int end_of_packet,
+ dma_addr_t unmap_addr, unsigned unmap_len)
+{
+ struct efx_tx_buffer *buffer;
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned dma_len, fill_level, insert_ptr, misalign;
+ int q_space;
+
+ EFX_BUG_ON_PARANOID(len <= 0);
+
+ fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+ /* -1 as there is no way to represent all descriptors used */
+ q_space = efx->type->txd_ring_mask - 1 - fill_level;
+
+ while (1) {
+ if (unlikely(q_space-- <= 0)) {
+ /* It might be that completions have happened
+ * since the xmit path last checked. Update
+ * the xmit path's copy of read_count.
+ */
+ ++tx_queue->stopped;
+ /* This memory barrier protects the change of
+ * stopped from the access of read_count. */
+ smp_mb();
+ tx_queue->old_read_count =
+ *(volatile unsigned *)&tx_queue->read_count;
+ fill_level = (tx_queue->insert_count
+ - tx_queue->old_read_count);
+ q_space = efx->type->txd_ring_mask - 1 - fill_level;
+ if (unlikely(q_space-- <= 0))
+ return 1;
+ smp_mb();
+ --tx_queue->stopped;
+ }
+
+ insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+ buffer = &tx_queue->buffer[insert_ptr];
+ ++tx_queue->insert_count;
+
+ EFX_BUG_ON_PARANOID(tx_queue->insert_count -
+ tx_queue->read_count >
+ efx->type->txd_ring_mask);
+
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+
+ buffer->dma_addr = dma_addr;
+
+ /* Ensure we do not cross a boundary unsupported by H/W */
+ dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
+
+ misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
+ if (misalign && dma_len + misalign > 512)
+ dma_len = 512 - misalign;
+
+ /* If there is enough space to send then do so */
+ if (dma_len >= len)
+ break;
+
+ buffer->len = dma_len; /* Don't set the other members */
+ dma_addr += dma_len;
+ len -= dma_len;
+ }
+
+ EFX_BUG_ON_PARANOID(!len);
+ buffer->len = len;
+ buffer->skb = skb;
+ buffer->continuation = !end_of_packet;
+ buffer->unmap_addr = unmap_addr;
+ buffer->unmap_len = unmap_len;
+ return 0;
+}
+
+
+/*
+ * Put a TSO header into the TX queue.
+ *
+ * This is special-cased because we know that it is small enough to fit in
+ * a single fragment, and we know it doesn't cross a page boundary. It
+ * also allows us to not worry about end-of-packet etc.
+ */
+static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
+ struct efx_tso_header *tsoh, unsigned len)
+{
+ struct efx_tx_buffer *buffer;
+
+ buffer = &tx_queue->buffer[tx_queue->insert_count &
+ tx_queue->efx->type->txd_ring_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->len);
+ EFX_BUG_ON_PARANOID(buffer->unmap_len);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+ EFX_BUG_ON_PARANOID(buffer->tsoh);
+ buffer->len = len;
+ buffer->dma_addr = tsoh->dma_addr;
+ buffer->tsoh = tsoh;
+
+ ++tx_queue->insert_count;
+}
+
+
+/* Remove descriptors put into a tx_queue. */
+static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ /* Work backwards until we hit the original insert pointer value */
+ while (tx_queue->insert_count != tx_queue->write_count) {
+ --tx_queue->insert_count;
+ buffer = &tx_queue->buffer[tx_queue->insert_count &
+ tx_queue->efx->type->txd_ring_mask];
+ efx_tsoh_free(tx_queue, buffer);
+ EFX_BUG_ON_PARANOID(buffer->skb);
+ buffer->len = 0;
+ buffer->continuation = 1;
+ if (buffer->unmap_len) {
+ pci_unmap_page(tx_queue->efx->pci_dev,
+ buffer->unmap_addr,
+ buffer->unmap_len, PCI_DMA_TODEVICE);
+ buffer->unmap_len = 0;
+ }
+ }
+}
+
+
+/* Parse the SKB header and initialise state. */
+static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
+{
+ /* All ethernet/IP/TCP headers combined size is TCP header size
+ * plus offset of TCP header relative to start of packet.
+ */
+ st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
+ + PTR_DIFF(tcp_hdr(skb), skb->data));
+ st->p.full_packet_size = (st->p.header_length
+ + skb_shinfo(skb)->gso_size);
+
+ st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
+ st->seqnum = ntohl(tcp_hdr(skb)->seq);
+
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
+ EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
+
+ st->packet_space = st->p.full_packet_size;
+ st->remaining_len = skb->len - st->p.header_length;
+}
+
+
+/**
+ * tso_get_fragment - record fragment details and map for DMA
+ * @st: TSO state
+ * @efx: Efx NIC
+ * @data: Pointer to fragment data
+ * @len: Length of fragment
+ *
+ * Record fragment details and map for DMA. Return 0 on success, or
+ * -%ENOMEM if DMA mapping fails.
+ */
+static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
+ int len, struct page *page, int page_off)
+{
+
+ st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
+ len, PCI_DMA_TODEVICE);
+ if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+ st->ifc.unmap_len = len;
+ st->ifc.len = len;
+ st->ifc.dma_addr = st->ifc.unmap_addr;
+ st->ifc.page = page;
+ st->ifc.page_off = page_off;
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+
+/**
+ * tso_fill_packet_with_fragment - form descriptors for the current fragment
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Form descriptors for the current fragment, until we reach the end
+ * of fragment or end-of-packet. Return 0 on success, 1 if not enough
+ * space in @tx_queue.
+ */
+static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+
+ int n, end_of_packet, rc;
+
+ if (st->ifc.len == 0)
+ return 0;
+ if (st->packet_space == 0)
+ return 0;
+
+ EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
+ EFX_BUG_ON_PARANOID(st->packet_space <= 0);
+
+ n = min(st->ifc.len, st->packet_space);
+
+ st->packet_space -= n;
+ st->remaining_len -= n;
+ st->ifc.len -= n;
+ st->ifc.page_off += n;
+ end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
+
+ rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
+ st->remaining_len ? NULL : skb,
+ end_of_packet, st->ifc.unmap_addr,
+ st->ifc.len ? 0 : st->ifc.unmap_len);
+
+ st->ifc.dma_addr += n;
+
+ return rc;
+}
+
+
+/**
+ * tso_start_new_packet - generate a new header and prepare for the new packet
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ * @st: TSO state
+ *
+ * Generate a new header and prepare for the new packet. Return 0 on
+ * success, or -1 if failed to alloc header.
+ */
+static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb,
+ struct tso_state *st)
+{
+ struct efx_tso_header *tsoh;
+ struct iphdr *tsoh_iph;
+ struct tcphdr *tsoh_th;
+ unsigned ip_length;
+ u8 *header;
+
+ /* Allocate a DMA-mapped header buffer. */
+ if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
+ if (tx_queue->tso_headers_free == NULL) {
+ if (efx_tsoh_block_alloc(tx_queue))
+ return -1;
+ }
+ EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
+ tsoh = tx_queue->tso_headers_free;
+ tx_queue->tso_headers_free = tsoh->next;
+ tsoh->unmap_len = 0;
+ } else {
+ tx_queue->tso_long_headers++;
+ tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
+ if (unlikely(!tsoh))
+ return -1;
+ }
+
+ header = TSOH_BUFFER(tsoh);
+ tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
+ tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
+
+ /* Copy and update the headers. */
+ memcpy(header, skb->data, st->p.header_length);
+
+ tsoh_th->seq = htonl(st->seqnum);
+ st->seqnum += skb_shinfo(skb)->gso_size;
+ if (st->remaining_len > skb_shinfo(skb)->gso_size) {
+ /* This packet will not finish the TSO burst. */
+ ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
+ tsoh_th->fin = 0;
+ tsoh_th->psh = 0;
+ } else {
+ /* This packet will be the last in the TSO burst. */
+ ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
+ + st->remaining_len);
+ tsoh_th->fin = tcp_hdr(skb)->fin;
+ tsoh_th->psh = tcp_hdr(skb)->psh;
+ }
+ tsoh_iph->tot_len = htons(ip_length);
+
+ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+ tsoh_iph->id = htons(st->p.ipv4_id);
+ st->p.ipv4_id++;
+
+ st->packet_space = skb_shinfo(skb)->gso_size;
+ ++tx_queue->tso_packets;
+
+ /* Form a descriptor for this header. */
+ efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
+
+ return 0;
+}
+
+
+/**
+ * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
+ * @tx_queue: Efx TX queue
+ * @skb: Socket buffer
+ *
+ * Context: You must hold netif_tx_lock() to call this function.
+ *
+ * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
+ * @skb was not enqueued. In all cases @skb is consumed. Return
+ * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
+ */
+static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ const struct sk_buff *skb)
+{
+ int frag_i, rc, rc2 = NETDEV_TX_OK;
+ struct tso_state state;
+ skb_frag_t *f;
+
+ /* Verify TSO is safe - these checks should never fail. */
+ efx_tso_check_safe(skb);
+
+ EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+ tso_start(&state, skb);
+
+ /* Assume that skb header area contains exactly the headers, and
+ * all payload is in the frag list.
+ */
+ if (skb_headlen(skb) == state.p.header_length) {
+ /* Grab the first payload fragment. */
+ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
+ frag_i = 0;
+ f = &skb_shinfo(skb)->frags[frag_i];
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ f->size, f->page, f->page_offset);
+ if (rc)
+ goto mem_err;
+ } else {
+ /* It may look like this code fragment assumes that the
+ * skb->data portion does not cross a page boundary, but
+ * that is not the case. It is guaranteed to be direct
+ * mapped memory, and therefore is physically contiguous,
+ * and so DMA will work fine. kmap_atomic() on this region
+ * will just return the direct mapping, so that will work
+ * too.
+ */
+ int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
+ int hl = state.p.header_length;
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ skb_headlen(skb) - hl,
+ virt_to_page(skb->data), page_off + hl);
+ if (rc)
+ goto mem_err;
+ frag_i = -1;
+ }
+
+ if (tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+
+ while (1) {
+ rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
+ if (unlikely(rc))
+ goto stop;
+
+ /* Move onto the next fragment? */
+ if (state.ifc.len == 0) {
+ if (++frag_i >= skb_shinfo(skb)->nr_frags)
+ /* End of payload reached. */
+ break;
+ f = &skb_shinfo(skb)->frags[frag_i];
+ rc = tso_get_fragment(&state, tx_queue->efx,
+ f->size, f->page, f->page_offset);
+ if (rc)
+ goto mem_err;
+ }
+
+ /* Start at new packet? */
+ if (state.packet_space == 0 &&
+ tso_start_new_packet(tx_queue, skb, &state) < 0)
+ goto mem_err;
+ }
+
+ /* Pass off to hardware */
+ falcon_push_buffers(tx_queue);
+
+ tx_queue->tso_bursts++;
+ return NETDEV_TX_OK;
+
+ mem_err:
+ EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
+ " error\n");
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ goto unwind;
+
+ stop:
+ rc2 = NETDEV_TX_BUSY;
+
+ /* Stop the queue if it wasn't stopped before. */
+ if (tx_queue->stopped == 1)
+ efx_stop_queue(tx_queue->efx);
+
+ unwind:
+ efx_enqueue_unwind(tx_queue);
+ return rc2;
+}
+
+
+/*
+ * Free up all TSO datastructures associated with tx_queue. This
+ * routine should be called only once the tx_queue is both empty and
+ * will no longer be used.
+ */
+static void efx_fini_tso(struct efx_tx_queue *tx_queue)
+{
+ unsigned i;
+
+ if (tx_queue->buffer) {
+ for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
+ efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
+ }
+
+ while (tx_queue->tso_headers_free != NULL)
+ efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+ tx_queue->efx->pci_dev);
+}
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index dca62f190198..35ab19c27f8d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -16,7 +16,7 @@
*/
#define EFX_WORKAROUND_ALWAYS(efx) 1
-#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
+#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 66dd5bf1eaa9..f3684ad28887 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -24,6 +24,10 @@
MDIO_MMDREG_DEVS0_PMAPMD | \
MDIO_MMDREG_DEVS0_PHYXS)
+#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
+ (1 << LOOPBACK_PMAPMD) | \
+ (1 << LOOPBACK_NETWORK))
+
/****************************************************************************/
/* Quake-specific MDIO registers */
#define MDIO_QUAKE_LED0_REG (0xD006)
@@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
mode);
}
+struct xfp_phy_data {
+ int tx_disabled;
+};
+
#define XFP_MAX_RESET_TIME 500
#define XFP_RESET_WAIT 10
@@ -72,18 +80,33 @@ static int xfp_reset_phy(struct efx_nic *efx)
static int xfp_phy_init(struct efx_nic *efx)
{
+ struct xfp_phy_data *phy_data;
u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
int rc;
+ phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
+ if (!phy_data)
+ return -ENOMEM;
+ efx->phy_data = phy_data;
+
EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
MDIO_ID_REV(devid));
+ phy_data->tx_disabled = efx->tx_disabled;
+
rc = xfp_reset_phy(efx);
EFX_INFO(efx, "XFP: PHY init %s.\n",
rc ? "failed" : "successful");
+ if (rc < 0)
+ goto fail;
+ return 0;
+
+ fail:
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
return rc;
}
@@ -110,6 +133,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx)
static void xfp_phy_reconfigure(struct efx_nic *efx)
{
+ struct xfp_phy_data *phy_data = efx->phy_data;
+
+ /* Reset the PHY when moving from tx off to tx on */
+ if (phy_data->tx_disabled && !efx->tx_disabled)
+ xfp_reset_phy(efx);
+
+ mdio_clause45_transmit_disable(efx);
+ mdio_clause45_phy_reconfigure(efx);
+
+ phy_data->tx_disabled = efx->tx_disabled;
efx->link_up = xfp_link_ok(efx);
efx->link_options = GM_LPA_10000FULL;
}
@@ -119,6 +152,10 @@ static void xfp_phy_fini(struct efx_nic *efx)
{
/* Clobber the LED if it was blinking */
efx->board_info.blink(efx, 0);
+
+ /* Free the context block */
+ kfree(efx->phy_data);
+ efx->phy_data = NULL;
}
struct efx_phy_operations falcon_xfp_phy_ops = {
@@ -129,4 +166,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
.clear_interrupt = xfp_phy_clear_interrupt,
.reset_xaui = efx_port_dummy_op_void,
.mmds = XFP_REQUIRED_DEVS,
+ .loopbacks = XFP_LOOPBACKS,
};