summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/efx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.c')
-rw-r--r--drivers/net/ethernet/sfc/efx.c201
1 files changed, 170 insertions, 31 deletions
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index fd844b53e385..83d464347021 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -83,6 +83,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_MC_BIST] = "MC_BIST",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -91,6 +92,12 @@ const char *const efx_reset_type_names[] = {
*/
static struct workqueue_struct *reset_workqueue;
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS 100
+#define BIST_WAIT_DELAY_COUNT 100
+
/**************************************************************************
*
* Configurable values
@@ -246,7 +253,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue, true);
}
return spent;
@@ -639,7 +646,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_nic_generate_fill_event(rx_queue);
+ efx_stop_eventq(channel);
+ efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
@@ -1051,18 +1060,23 @@ static void efx_start_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->port_enabled = true;
- /* efx_mac_work() might have been scheduled after efx_stop_port(),
- * and then cancelled by efx_flush_all() */
+ /* Ensure MAC ingress/egress is enabled */
efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock);
}
-/* Prevent efx_mac_work() and efx_monitor() from working */
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again. This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
static void efx_stop_port(struct efx_nic *efx)
{
netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+ EFX_ASSERT_RESET_SERIALISED(efx);
+
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
mutex_unlock(&efx->mac_lock);
@@ -1070,6 +1084,10 @@ static void efx_stop_port(struct efx_nic *efx)
/* Serialise against efx_set_multicast_list() */
netif_addr_lock_bh(efx->net_dev);
netif_addr_unlock_bh(efx->net_dev);
+
+ cancel_delayed_work_sync(&efx->monitor_work);
+ efx_selftest_async_cancel(efx);
+ cancel_work_sync(&efx->mac_work);
}
static void efx_fini_port(struct efx_nic *efx)
@@ -1099,6 +1117,77 @@ static void efx_remove_port(struct efx_nic *efx)
*
**************************************************************************/
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+ return left->type == right->type &&
+ left->vpd_sn && right->vpd_sn &&
+ !strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ if (efx->primary == efx) {
+ /* Adding primary function; look for secondaries */
+
+ netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+ list_add_tail(&efx->node, &efx_primary_list);
+
+ list_for_each_entry_safe(other, next, &efx_unassociated_list,
+ node) {
+ if (efx_same_controller(efx, other)) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to secondary list of %s %s\n",
+ pci_name(efx->pci_dev),
+ efx->net_dev->name);
+ list_add_tail(&other->node,
+ &efx->secondary_list);
+ other->primary = efx;
+ }
+ }
+ } else {
+ /* Adding secondary function; look for primary */
+
+ list_for_each_entry(other, &efx_primary_list, node) {
+ if (efx_same_controller(efx, other)) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to secondary list of %s %s\n",
+ pci_name(other->pci_dev),
+ other->net_dev->name);
+ list_add_tail(&efx->node,
+ &other->secondary_list);
+ efx->primary = other;
+ return;
+ }
+ }
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "adding to unassociated list\n");
+ list_add_tail(&efx->node, &efx_unassociated_list);
+ }
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+ struct efx_nic *other, *next;
+
+ list_del(&efx->node);
+ efx->primary = NULL;
+
+ list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+ list_del(&other->node);
+ netif_dbg(other, probe, other->net_dev,
+ "moving to unassociated list\n");
+ list_add_tail(&other->node, &efx_unassociated_list);
+ other->primary = NULL;
+ }
+}
+
/* This configures the PCI device to enable I/O and DMA. */
static int efx_init_io(struct efx_nic *efx)
{
@@ -1675,18 +1764,10 @@ static void efx_start_all(struct efx_nic *efx)
}
efx->type->start_stats(efx);
-}
-
-/* Flush all delayed work. Should only be called when no more delayed work
- * will be scheduled. This doesn't flush pending online resets (efx_reset),
- * since we're holding the rtnl_lock at this point. */
-static void efx_flush_all(struct efx_nic *efx)
-{
- /* Make sure the hardware monitor and event self-test are stopped */
- cancel_delayed_work_sync(&efx->monitor_work);
- efx_selftest_async_cancel(efx);
- /* Stop scheduled port reconfigurations */
- cancel_work_sync(&efx->mac_work);
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
}
/* Quiesce the hardware and software data path, and regular activity
@@ -1702,12 +1783,16 @@ static void efx_stop_all(struct efx_nic *efx)
if (!efx->port_enabled)
return;
+ /* update stats before we go down so we can accurately count
+ * rx_nodesc_drops
+ */
+ efx->type->pull_stats(efx);
+ spin_lock_bh(&efx->stats_lock);
+ efx->type->update_stats(efx, NULL, NULL);
+ spin_unlock_bh(&efx->stats_lock);
efx->type->stop_stats(efx);
efx_stop_port(efx);
- /* Flush efx_mac_work(), refill_workqueue, monitor_work */
- efx_flush_all(efx);
-
/* Stop the kernel transmit interface. This is only valid if
* the device is stopped or detached; otherwise the watchdog
* may fire immediately.
@@ -1851,7 +1936,9 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct mii_ioctl_data *data = if_mii(ifr);
if (cmd == SIOCSHWTSTAMP)
- return efx_ptp_ioctl(efx, ifr, cmd);
+ return efx_ptp_set_ts_config(efx, ifr);
+ if (cmd == SIOCGHWTSTAMP)
+ return efx_ptp_get_ts_config(efx, ifr);
/* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
@@ -2064,7 +2151,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
/* If disabling RX n-tuple filtering, clear existing filters */
if (net_dev->features & ~data & NETIF_F_NTUPLE)
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
return 0;
}
@@ -2198,6 +2285,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue);
}
+ efx_associate(efx);
+
rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2211,6 +2300,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_registered:
rtnl_lock();
+ efx_dissociate(efx);
unregister_netdevice(net_dev);
fail_locked:
efx->state = STATE_UNINIT;
@@ -2387,6 +2477,24 @@ int efx_try_recovery(struct efx_nic *efx)
return 0;
}
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+ int i;
+
+ for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+ if (efx_mcdi_poll_reboot(efx))
+ goto out;
+ msleep(BIST_WAIT_DELAY_MS);
+ }
+
+ netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+ /* Either way unset the BIST flag. If we found no reboot we probably
+ * won't recover, but we should try.
+ */
+ efx->mc_bist_for_other_fn = false;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
@@ -2399,6 +2507,9 @@ static void efx_reset_work(struct work_struct *data)
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
+ if (method == RESET_TYPE_MC_BIST)
+ efx_wait_for_bist_end(efx);
+
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
@@ -2437,6 +2548,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
+ case RESET_TYPE_MC_BIST:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2530,6 +2642,8 @@ static int efx_init_struct(struct efx_nic *efx,
int i;
/* Initialise common structures */
+ INIT_LIST_HEAD(&efx->node);
+ INIT_LIST_HEAD(&efx->secondary_list);
spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list);
@@ -2548,6 +2662,8 @@ static int efx_init_struct(struct efx_nic *efx,
NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+ efx->rx_packet_ts_offset =
+ efx->type->rx_ts_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2588,6 +2704,8 @@ static void efx_fini_struct(struct efx_nic *efx)
for (i = 0; i < EFX_MAX_CHANNELS; i++)
kfree(efx->channel[i]);
+ kfree(efx->vpd_sn);
+
if (efx->workqueue) {
destroy_workqueue(efx->workqueue);
efx->workqueue = NULL;
@@ -2632,6 +2750,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
+ efx_dissociate(efx);
dev_close(efx->net_dev);
efx_disable_interrupts(efx);
rtnl_unlock();
@@ -2647,7 +2766,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
efx_fini_struct(efx);
- pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
@@ -2659,12 +2777,12 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
* always appear within the first 512 bytes.
*/
#define SFC_VPD_LEN 512
-static void efx_print_product_vpd(struct efx_nic *efx)
+static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
char vpd_data[SFC_VPD_LEN];
ssize_t vpd_size;
- int i, j;
+ int ro_start, ro_size, i, j;
/* Get the vpd data from the device */
vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
@@ -2674,14 +2792,15 @@ static void efx_print_product_vpd(struct efx_nic *efx)
}
/* Get the Read only section */
- i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
+ ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
+ if (ro_start < 0) {
netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
return;
}
- j = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
+ ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+ j = ro_size;
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
if (i + j > vpd_size)
j = vpd_size - i;
@@ -2701,6 +2820,27 @@ static void efx_print_product_vpd(struct efx_nic *efx)
netif_info(efx, drv, efx->net_dev,
"Part Number : %.*s\n", j, &vpd_data[i]);
+
+ i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+ j = ro_size;
+ i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
+ if (i < 0) {
+ netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
+ return;
+ }
+
+ j = pci_vpd_info_field_size(&vpd_data[i]);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (i + j > vpd_size) {
+ netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
+ return;
+ }
+
+ efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
+ if (!efx->vpd_sn)
+ return;
+
+ snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
}
@@ -2797,7 +2937,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_info(efx, probe, efx->net_dev,
"Solarflare NIC detected\n");
- efx_print_product_vpd(efx);
+ efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2841,7 +2981,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
- pci_set_drvdata(pci_dev, NULL);
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);