summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/i40e/i40e_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_main.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1757
1 files changed, 1357 insertions, 400 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index be15938ba213..b901371ca361 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
@@ -27,6 +26,9 @@
/* Local includes */
#include "i40e.h"
+#ifdef CONFIG_I40E_VXLAN
+#include <net/vxlan.h>
+#endif
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -36,22 +38,24 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 11
+#define DRV_VERSION_BUILD 30
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
const char i40e_driver_version_str[] = DRV_VERSION;
-static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
/* a bit of forward declarations */
static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
static void i40e_handle_reset_warning(struct i40e_pf *pf);
static int i40e_add_vsi(struct i40e_vsi *vsi);
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
-static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_fdir_sb_setup(struct i40e_pf *pf);
+static int i40e_veb_get_bw_info(struct i40e_veb *veb);
/* i40e_pci_tbl - PCI Device ID Table
*
@@ -61,16 +65,16 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
- {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
- {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
/* required last entry */
{0, }
};
@@ -354,6 +358,12 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return stats;
+
+ if (!vsi->tx_rings)
+ return stats;
+
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
@@ -413,7 +423,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
- if (vsi->rx_rings)
+ if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i]->stats));
@@ -424,6 +434,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
}
+ }
vsi->stat_offsets_loaded = false;
}
@@ -458,7 +469,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
{
u64 new_data;
- if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ if (hw->device_id == I40E_DEV_ID_QEMU) {
new_data = rd32(hw, loreg);
new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
} else {
@@ -574,10 +585,11 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
veb->stat_offsets_loaded,
&oes->tx_discards, &es->tx_discards);
- i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
- veb->stat_offsets_loaded,
- &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
-
+ if (hw->revision_id > 0)
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol,
+ &es->rx_unknown_protocol);
i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
veb->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
@@ -775,8 +787,8 @@ void i40e_update_stats(struct i40e_vsi *vsi)
} while (u64_stats_fetch_retry_bh(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
- rx_buf += p->rx_stats.alloc_rx_buff_failed;
- rx_page += p->rx_stats.alloc_rx_page_failed;
+ rx_buf += p->rx_stats.alloc_buff_failed;
+ rx_page += p->rx_stats.alloc_page_failed;
}
rcu_read_unlock();
vsi->tx_restart = tx_restart;
@@ -1062,7 +1074,7 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
- is_vf, is_netdev))
+ is_vf, is_netdev))
return NULL;
}
}
@@ -1204,6 +1216,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return -EADDRNOTAVAIL;
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@@ -1257,6 +1273,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
+ u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
@@ -1278,6 +1295,9 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+ /* Number of queues per enabled TC */
+ num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1285,30 +1305,25 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
int pow, num_qps;
- vsi->tc_config.tc_info[i].qoffset = offset;
switch (vsi->type) {
case I40E_VSI_MAIN:
- if (i == 0)
- qcount = pf->rss_size;
- else
- qcount = pf->num_tc_qps;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = min_t(int, pf->rss_size, num_tc_qps);
break;
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
default:
- qcount = vsi->alloc_queue_pairs;
- vsi->tc_config.tc_info[i].qcount = qcount;
+ qcount = num_tc_qps;
WARN_ON(i != 0);
break;
}
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ vsi->tc_config.tc_info[i].qcount = qcount;
/* find the power-of-2 of the number of queue pairs */
- num_qps = vsi->tc_config.tc_info[i].qcount;
+ num_qps = qcount;
pow = 0;
- while (num_qps &&
- ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ while (num_qps && ((1 << pow) < qcount)) {
pow++;
num_qps >>= 1;
}
@@ -1318,7 +1333,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
(offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
(pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
- offset += vsi->tc_config.tc_info[i].qcount;
+ offset += qcount;
} else {
/* TC is not enabled so set the offset to
* default queue and allocate one queue
@@ -1494,11 +1509,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY ||
- (vsi->netdev && !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
num_del++;
@@ -1564,12 +1574,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
add_list[num_add].queue_number = 0;
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
-
- /* vlan0 as wild card to allow packets from all vlans */
- if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
- !(vsi->netdev->features &
- NETIF_F_HW_VLAN_CTAG_FILTER)))
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
@@ -1635,6 +1639,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev,
"set uni promisc failed, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
+ aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
}
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1687,6 +1698,27 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * i40e_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ **/
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return i40e_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return i40e_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
* @vsi: the vsi being adjusted
**/
@@ -1768,7 +1800,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@@ -1794,13 +1825,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Could not sync filters for vid %d\n", vid);
- return ret;
- }
-
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@@ -1821,7 +1845,10 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
return -ENOMEM;
}
}
+ }
+ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
+ if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev)) {
@@ -1837,10 +1864,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
- ret = i40e_sync_vsi_filters(vsi);
}
- return ret;
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
+ return i40e_sync_vsi_filters(vsi);
}
/**
@@ -1856,7 +1886,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
- int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@@ -1867,12 +1896,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
- ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
- dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
- return ret;
- }
-
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@@ -1915,6 +1938,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
+ if (test_bit(__I40E_DOWN, &vsi->back->state) ||
+ test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+ return 0;
+
return i40e_sync_vsi_filters(vsi);
}
@@ -2005,8 +2032,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
- vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
+ I40E_AQ_VSI_PVLAN_INSERT_PVID |
+ I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@@ -2029,8 +2057,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
+ i40e_vlan_stripping_disable(vsi);
+
vsi->info.pvid = 0;
- i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@@ -2063,8 +2092,11 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->tx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->tx_rings[i]->desc)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
i40e_free_tx_resources(vsi->tx_rings[i]);
}
@@ -2097,8 +2129,11 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
{
int i;
+ if (!vsi->rx_rings)
+ return;
+
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->rx_rings[i]->desc)
+ if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
i40e_free_rx_resources(vsi->rx_rings[i]);
}
@@ -2118,7 +2153,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
/* some ATR related tx ring init */
- if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
ring->atr_sample_rate = vsi->back->atr_sample_rate;
ring->atr_count = 0;
} else {
@@ -2127,6 +2162,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* initialize XPS */
if (ring->q_vector && ring->netdev &&
+ vsi->tc_config.numtc <= 1 &&
!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
@@ -2138,8 +2174,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
tx_ctx.new_context = 1;
tx_ctx.base = (ring->dma / 128);
tx_ctx.qlen = ring->count;
- tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED));
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED));
+ tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
/* As part of VSI creation/update, FW allocates certain
* Tx arbitration queue sets for each TC enabled for
@@ -2173,7 +2210,10 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
}
/* Now associate this queue with this PCI function */
- qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ if (vsi->type == I40E_VSI_VMDQ2)
+ qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
+ else
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
@@ -2240,7 +2280,10 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.tphwdesc_ena = 1;
rx_ctx.tphdata_ena = 1;
rx_ctx.tphhead_ena = 1;
- rx_ctx.lrxqthresh = 2;
+ if (hw->revision_id == 0)
+ rx_ctx.lrxqthresh = 0;
+ else
+ rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
@@ -2474,6 +2517,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_GRST_MASK |
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
@@ -2482,8 +2526,8 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
wr32(hw, I40E_PFINT_ICR0_ENA, val);
/* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
/* OTHER_ITR_IDX = 0 */
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
@@ -2529,6 +2573,19 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
}
/**
+ * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ i40e_flush(hw);
+}
+
+/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
**/
@@ -2581,23 +2638,6 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
}
/**
- * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
- * @irq: interrupt number
- * @data: pointer to a q_vector
- **/
-static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
-{
- struct i40e_q_vector *q_vector = data;
-
- if (!q_vector->tx.ring && !q_vector->rx.ring)
- return IRQ_HANDLED;
-
- pr_info("fdir ring cleaning needed\n");
-
- return IRQ_HANDLED;
-}
-
-/**
* i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
* @vsi: the VSI being configured
* @basename: name for the vector
@@ -2737,20 +2777,21 @@ static irqreturn_t i40e_intr(int irq, void *data)
{
struct i40e_pf *pf = (struct i40e_pf *)data;
struct i40e_hw *hw = &pf->hw;
+ irqreturn_t ret = IRQ_NONE;
u32 icr0, icr0_remaining;
u32 val, ena_mask;
icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- val = rd32(hw, I40E_PFINT_DYN_CTL0);
- val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
- return IRQ_NONE;
+ goto enable_intr;
- ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+ /* if interrupt but no bits showing, must be SWINT */
+ if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
+ (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
+ pf->sw_int_count++;
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
@@ -2790,14 +2831,31 @@ static irqreturn_t i40e_intr(int irq, void *data)
val = rd32(hw, I40E_GLGEN_RSTAT);
val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
>> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- if (val & I40E_RESET_CORER)
+ if (val == I40E_RESET_CORER)
pf->corer_count++;
- else if (val & I40E_RESET_GLOBR)
+ else if (val == I40E_RESET_GLOBR)
pf->globr_count++;
- else if (val & I40E_RESET_EMPR)
+ else if (val == I40E_RESET_EMPR)
pf->empr_count++;
}
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
+ u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
+
+ if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ i40e_ptp_tx_hwtstamp(pf);
+ prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
+ }
+
+ wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
+ }
+
/* If a critical error is pending we have no choice but to reset the
* device.
* Report and mask out any remaining unexpected interrupts.
@@ -2806,22 +2864,19 @@ static irqreturn_t i40e_intr(int irq, void *data)
if (icr0_remaining) {
dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
icr0_remaining);
- if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
- (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
(icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- dev_info(&pf->pdev->dev, "HMC error interrupt\n");
- } else {
- dev_info(&pf->pdev->dev, "device will be reset\n");
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
- i40e_service_event_schedule(pf);
- }
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
}
ena_mask &= ~icr0_remaining;
}
+ ret = IRQ_HANDLED;
+enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) {
@@ -2829,6 +2884,94 @@ static irqreturn_t i40e_intr(int irq, void *data)
i40e_irq_dynamic_enable_icr0(pf);
}
+ return ret;
+}
+
+/**
+ * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ struct i40e_vsi *vsi = tx_ring->vsi;
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+
+
+ /* move to the next desc and buffer to clean */
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ tx_ring->q_vector->v_idx + vsi->base_vector);
+ }
+ return budget > 0;
+}
+
+/**
+ * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+ struct i40e_vsi *vsi;
+
+ if (!q_vector->tx.ring)
+ return IRQ_HANDLED;
+
+ vsi = q_vector->tx.ring->vsi;
+ i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
+
return IRQ_HANDLED;
}
@@ -2971,28 +3114,20 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
} while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
- if (enable) {
- /* is STAT set ? */
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already enabled\n", i);
- continue;
- }
- } else {
- /* is !STAT set ? */
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
- dev_info(&pf->pdev->dev,
- "Tx %d already disabled\n", i);
- continue;
- }
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
+ if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
- if (enable)
+ if (enable) {
+ wr32(hw, I40E_QTX_HEAD(pf_q), 0);
tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
I40E_QTX_ENA_QENA_STAT_MASK;
- else
+ } else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ }
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
@@ -3016,6 +3151,9 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
}
}
+ if (hw->revision_id == 0)
+ mdelay(50);
+
return 0;
}
@@ -3088,9 +3226,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
* @vsi: the VSI being configured
* @enable: start or stop the rings
**/
-static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
{
- int ret;
+ int ret = 0;
/* do rx first for enable and last for disable */
if (request) {
@@ -3099,10 +3237,9 @@ static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
return ret;
ret = i40e_vsi_control_tx(vsi, request);
} else {
- ret = i40e_vsi_control_tx(vsi, request);
- if (ret)
- return ret;
- ret = i40e_vsi_control_rx(vsi, request);
+ /* Ignore return value, we need to shutdown whatever we can */
+ i40e_vsi_control_tx(vsi, request);
+ i40e_vsi_control_rx(vsi, request);
}
return ret;
@@ -3128,7 +3265,8 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
u16 vector = i + base;
/* free only the irqs that were actually requested */
- if (vsi->q_vectors[i]->num_ringpairs == 0)
+ if (!vsi->q_vectors[i] ||
+ !vsi->q_vectors[i]->num_ringpairs)
continue;
/* clear the affinity_mask in the IRQ descriptor */
@@ -3540,7 +3678,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
/* Get the VSI level BW configuration per TC */
aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
- NULL);
+ NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
"couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
@@ -3751,6 +3889,149 @@ out:
}
/**
+ * i40e_veb_config_tc - Configure TCs for given VEB
+ * @veb: given VEB
+ * @enabled_tc: TC bitmap
+ *
+ * Configures given TC bitmap for VEB (switching) element
+ **/
+int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
+{
+ struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
+ struct i40e_pf *pf = veb->pf;
+ int ret = 0;
+ int i;
+
+ /* No TCs or already enabled TCs just return */
+ if (!enabled_tc || veb->enabled_tc == enabled_tc)
+ return ret;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ /* bw_data.absolute_credits is not set (relative) */
+
+ /* Enable ETS TCs with equal BW Share for now */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_data.tc_bw_share_credits[i] = 1;
+ }
+
+ ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "veb bw config failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the BW information */
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed getting veb bw config, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ }
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
+ * @pf: PF struct
+ *
+ * Reconfigure VEB/VSIs on a given PF; it is assumed that
+ * the caller would've quiesce all the VSIs before calling
+ * this function
+ **/
+static void i40e_dcb_reconfigure(struct i40e_pf *pf)
+{
+ u8 tc_map = 0;
+ int ret;
+ u8 v;
+
+ /* Enable the TCs available on PF to all VEBs */
+ tc_map = i40e_pf_get_tc_map(pf);
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+ ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VEB seid=%d\n",
+ pf->veb[v]->seid);
+ /* Will try to configure as many components */
+ }
+ }
+
+ /* Update each VSI */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v])
+ continue;
+
+ /* - Enable all TCs for the LAN VSI
+ * - For all others keep them at TC0 for now
+ */
+ if (v == pf->lan_vsi)
+ tc_map = i40e_pf_get_tc_map(pf);
+ else
+ tc_map = i40e_pf_get_default_tc(pf);
+
+ ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed configuring TC for VSI seid=%d\n",
+ pf->vsi[v]->seid);
+ /* Will try to configure as many components */
+ } else {
+ if (pf->vsi[v]->netdev)
+ i40e_dcbnl_set_all(pf->vsi[v]);
+ }
+ }
+}
+
+/**
+ * i40e_init_pf_dcb - Initialize DCB configuration
+ * @pf: PF being configured
+ *
+ * Query the current DCB configuration and cache it
+ * in the hardware structure
+ **/
+static int i40e_init_pf_dcb(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ if (pf->hw.func_caps.npar_enable)
+ goto out;
+
+ /* Get the initial DCB configuration */
+ err = i40e_init_dcb(hw);
+ if (!err) {
+ /* Device/Function is not DCBX capable */
+ if ((!hw->func_caps.dcb) ||
+ (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
+ dev_info(&pf->pdev->dev,
+ "DCBX offload is not supported or is disabled for this PF.\n");
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ goto out;
+
+ } else {
+ /* When status is not DISABLED then DCBX in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ }
+ }
+
+out:
+ return err;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
* i40e_up_complete - Finish the last steps of bringing up a connection
* @vsi: the VSI being configured
**/
@@ -3954,22 +4235,28 @@ static int i40e_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
err = i40e_up_complete(vsi);
if (err)
goto err_up_complete;
- if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
- err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
- if (err)
- netdev_info(netdev,
- "couldn't set broadcast err %d aq_err %d\n",
- err, pf->hw.aq.asq_last_status);
- }
+#ifdef CONFIG_I40E_VXLAN
+ vxlan_get_rx_port(netdev);
+#endif
return 0;
err_up_complete:
i40e_down(vsi);
+err_set_queues:
i40e_vsi_free_irq(vsi);
err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
@@ -4051,6 +4338,24 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
i40e_flush(&pf->hw);
+ } else if (reset_flags & (1 << __I40E_EMP_RESET_REQUESTED)) {
+
+ /* Request a Firmware Reset
+ *
+ * Same as Global reset, plus restarting the
+ * embedded firmware engine.
+ */
+ /* enable EMP Reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RSTENA_EMP);
+ val |= I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RSTENA_EMP, val);
+
+ /* force the reset */
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_EMPFWR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
/* Request a PF Reset
@@ -4088,6 +4393,144 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
+#ifdef CONFIG_I40E_DCB
+/**
+ * i40e_dcb_need_reconfig - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ **/
+bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
+ struct i40e_dcbx_config *old_cfg,
+ struct i40e_dcbx_config *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg,
+ &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prioritytable,
+ &old_cfg->etscfg.prioritytable,
+ sizeof(new_cfg->etscfg.prioritytable))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc,
+ &old_cfg->pfc,
+ sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app,
+ &old_cfg->app,
+ sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ return need_reconfig;
+}
+
+/**
+ * i40e_handle_lldp_event - Handle LLDP Change MIB event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static int i40e_handle_lldp_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lldp_get_mib *mib =
+ (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
+ struct i40e_dcbx_config tmp_dcbx_cfg;
+ bool need_reconfig = false;
+ int ret = 0;
+ u8 type;
+
+ /* Ignore if event is not for Nearest Bridge */
+ type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
+ & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
+ return ret;
+
+ /* Check MIB Type and return if event for Remote MIB update */
+ type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ if (type == I40E_AQ_LLDP_MIB_REMOTE) {
+ /* Update the remote cached instance and return */
+ ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ &hw->remote_dcbx_config);
+ goto exit;
+ }
+
+ /* Convert/store the DCBX data from LLDPDU temporarily */
+ memset(&tmp_dcbx_cfg, 0, sizeof(tmp_dcbx_cfg));
+ ret = i40e_lldp_to_dcb_config(e->msg_buf, &tmp_dcbx_cfg);
+ if (ret) {
+ /* Error in LLDPDU parsing return */
+ dev_info(&pf->pdev->dev, "Failed parsing LLDPDU from event buffer\n");
+ goto exit;
+ }
+
+ /* No change detected in DCBX configs */
+ if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+ goto exit;
+ }
+
+ need_reconfig = i40e_dcb_need_reconfig(pf, dcbx_cfg, &tmp_dcbx_cfg);
+
+ i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg);
+
+ /* Overwrite the new configuration */
+ *dcbx_cfg = tmp_dcbx_cfg;
+
+ if (!need_reconfig)
+ goto exit;
+
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+
+ i40e_pf_unquiesce_all_vsi(pf);
+exit:
+ return ret;
+}
+#endif /* CONFIG_I40E_DCB */
+
+/**
+ * i40e_do_reset_safe - Protected reset path for userland calls.
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ **/
+void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
+{
+ rtnl_lock();
+ i40e_do_reset(pf, reset_flags);
+ rtnl_unlock();
+}
+
/**
* i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
* @pf: board private structure
@@ -4242,6 +4685,9 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->vf)
i40e_vc_notify_link_state(pf);
+
+ if (pf->flags & I40E_FLAG_PTP)
+ i40e_ptp_set_increment(pf);
}
/**
@@ -4323,6 +4769,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_VEB; i++)
if (pf->veb[i])
i40e_update_veb_stats(pf->veb[i]);
+
+ i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
}
/**
@@ -4333,6 +4781,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
{
u32 reset_flags = 0;
+ rtnl_lock();
if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_REINIT_REQUESTED);
clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -4355,7 +4804,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
i40e_handle_reset_warning(pf);
- return;
+ goto unlock;
}
/* If we're already down or resetting, just bail */
@@ -4363,6 +4812,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
!test_bit(__I40E_DOWN, &pf->state) &&
!test_bit(__I40E_CONFIG_BUSY, &pf->state))
i40e_do_reset(pf, reset_flags);
+
+unlock:
+ rtnl_unlock();
}
/**
@@ -4426,6 +4878,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
return;
do {
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
ret = i40e_clean_arq_element(hw, &event, &pending);
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
dev_info(&pf->pdev->dev, "No ARQ event found\n");
@@ -4451,15 +4904,23 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
case i40e_aqc_opc_lldp_update_mib:
dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+#ifdef CONFIG_I40E_DCB
+ rtnl_lock();
+ ret = i40e_handle_lldp_event(pf, &event);
+ rtnl_unlock();
+#endif /* CONFIG_I40E_DCB */
break;
case i40e_aqc_opc_event_lan_overflow:
dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
i40e_handle_lan_overflow_event(pf, &event);
break;
+ case i40e_aqc_opc_send_msg_to_peer:
+ dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
+ break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event %d received\n",
- event.desc.opcode);
+ "ARQ Error: Unknown event 0x%04x received\n",
+ opcode);
break;
}
} while (pending && (i++ < pf->adminq_work_limit));
@@ -4589,6 +5050,9 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
+ /* increment MSI-X count because current FW skips one */
+ pf->hw.func_caps.num_msix_vectors++;
+
if (pf->hw.debug_mask & I40E_DEBUG_USER)
dev_info(&pf->pdev->dev,
"pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -4600,57 +5064,89 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
pf->hw.func_caps.num_tx_qp,
pf->hw.func_caps.num_vsis);
+#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
+ + pf->hw.func_caps.num_vfs)
+ if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
+ dev_info(&pf->pdev->dev,
+ "got num_vsis %d, setting num_vsis to %d\n",
+ pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
+ pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
+ }
+
return 0;
}
+static int i40e_vsi_clear(struct i40e_vsi *vsi);
+
/**
- * i40e_fdir_setup - initialize the Flow Director resources
+ * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
* @pf: board private structure
**/
-static void i40e_fdir_setup(struct i40e_pf *pf)
+static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
bool new_vsi = false;
int err, i;
- if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED)))
+ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
- pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-
- /* find existing or make new FDIR VSI */
+ /* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
+ break;
+ }
+ }
+
+ /* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
+ pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
- return;
+ goto err_vsi;
}
new_vsi = true;
}
- WARN_ON(vsi->base_queue != I40E_FDIR_RING);
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
err = i40e_vsi_setup_tx_resources(vsi);
- if (!err)
- err = i40e_vsi_setup_rx_resources(vsi);
- if (!err)
- err = i40e_vsi_configure(vsi);
- if (!err && new_vsi) {
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ if (new_vsi) {
char int_name[IFNAMSIZ + 9];
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
dev_driver_string(&pf->pdev->dev));
err = i40e_vsi_request_irq(vsi, int_name);
- }
- if (!err)
+ if (err)
+ goto err_setup_rx;
err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+ }
clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ return;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+err_vsi:
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ i40e_vsi_clear(vsi);
}
/**
@@ -4670,26 +5166,25 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
}
/**
- * i40e_handle_reset_warning - prep for the core to reset
+ * i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
- * Close up the VFs and other things in prep for a Core Reset,
- * then get ready to rebuild the world.
- **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+ * Close up the VFs and other things in prep for pf Reset.
+ **/
+static int i40e_prep_for_reset(struct i40e_pf *pf)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
- return;
+ return 0;
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- i40e_vc_notify_reset(pf);
+ if (i40e_check_asq_alive(hw))
+ i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@@ -4701,6 +5196,27 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
+ return ret;
+}
+
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -4728,13 +5244,6 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
- /* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- goto end_core_reset;
- }
-
ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp,
pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
@@ -4748,8 +5257,16 @@ static void i40e_handle_reset_warning(struct i40e_pf *pf)
goto end_core_reset;
}
+#ifdef CONFIG_I40E_DCB
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_pf_dcb failed: %d\n", ret);
+ goto end_core_reset;
+ }
+#endif /* CONFIG_I40E_DCB */
+
/* do basic switch setup */
- ret = i40e_setup_pf_switch(pf);
+ ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_core_reset;
@@ -4828,6 +5345,22 @@ end_core_reset:
}
/**
+ * i40e_handle_reset_warning - prep for the pf to reset, reset and rebuild
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ i40e_status ret;
+
+ ret = i40e_prep_for_reset(pf);
+ if (!ret)
+ i40e_reset_and_rebuild(pf, false);
+}
+
+/**
* i40e_handle_mdd_event
* @pf: pointer to the pf structure
*
@@ -4908,6 +5441,52 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
+{
+ const int vxlan_hdr_qwords = 4;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u8 filter_index;
+ __be16 port;
+ int i;
+
+ if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
+ return;
+
+ pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->pending_vxlan_bitmap & (1 << i)) {
+ pf->pending_vxlan_bitmap &= ~(1 << i);
+ port = pf->vxlan_ports[i];
+ ret = port ?
+ i40e_aq_add_udp_tunnel(hw, ntohs(port),
+ vxlan_hdr_qwords,
+ I40E_AQC_TUNNEL_TYPE_VXLAN,
+ &filter_index, NULL)
+ : i40e_aq_del_udp_tunnel(hw, i, NULL);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to execute AQ command for %s port %d with index %d\n",
+ port ? "adding" : "deleting",
+ ntohs(port), port ? i : i);
+
+ pf->vxlan_ports[i] = 0;
+ } else {
+ dev_info(&pf->pdev->dev, "%s port %d with AQ command with index %d\n",
+ port ? "Added" : "Deleted",
+ ntohs(port), port ? i : filter_index);
+ }
+ }
+ }
+}
+
+#endif
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -4926,6 +5505,9 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
i40e_check_hang_subtask(pf);
i40e_sync_filters_subtask(pf);
+#ifdef CONFIG_I40E_VXLAN
+ i40e_sync_vxlan_filters_subtask(pf);
+#endif
i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf);
@@ -5003,6 +5585,42 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
+ * @type: VSI pointer
+ * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
+{
+ int size;
+ int ret = 0;
+
+ /* allocate memory for both Tx and Rx ring pointers */
+ size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+ vsi->tx_rings = kzalloc(size, GFP_KERNEL);
+ if (!vsi->tx_rings)
+ return -ENOMEM;
+ vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+ if (alloc_qvectors) {
+ /* allocate memory for q_vector pointers */
+ size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+ vsi->q_vectors = kzalloc(size, GFP_KERNEL);
+ if (!vsi->q_vectors) {
+ ret = -ENOMEM;
+ goto err_vectors;
+ }
+ }
+ return ret;
+
+err_vectors:
+ kfree(vsi->tx_rings);
+ return ret;
+}
+
+/**
* i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
* @type: type of VSI
@@ -5014,8 +5632,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
{
int ret = -ENODEV;
struct i40e_vsi *vsi;
- int sz_vectors;
- int sz_rings;
int vsi_idx;
int i;
@@ -5065,22 +5681,9 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
if (ret)
goto err_rings;
- /* allocate memory for ring pointers */
- sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
- vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
- if (!vsi->tx_rings) {
- ret = -ENOMEM;
+ ret = i40e_vsi_alloc_arrays(vsi, true);
+ if (ret)
goto err_rings;
- }
- vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
-
- /* allocate memory for q_vector pointers */
- sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
- vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
- if (!vsi->q_vectors) {
- ret = -ENOMEM;
- goto err_vectors;
- }
/* Setup default MSIX irq handler for VSI */
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
@@ -5089,8 +5692,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
ret = vsi_idx;
goto unlock_pf;
-err_vectors:
- kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1;
kfree(vsi);
@@ -5100,6 +5701,26 @@ unlock_pf:
}
/**
+ * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
+ * @type: VSI pointer
+ * @free_qvectors: a bool to specify if q_vectors need to be freed.
+ *
+ * On error: returns error code (negative)
+ * On success: returns 0
+ **/
+static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
+{
+ /* free the ring and vector containers */
+ if (free_qvectors) {
+ kfree(vsi->q_vectors);
+ vsi->q_vectors = NULL;
+ }
+ kfree(vsi->tx_rings);
+ vsi->tx_rings = NULL;
+ vsi->rx_rings = NULL;
+}
+
+/**
* i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured
**/
@@ -5135,9 +5756,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
- /* free the ring and vector containers */
- kfree(vsi->q_vectors);
- kfree(vsi->tx_rings);
+ i40e_vsi_free_arrays(vsi, true);
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi)
@@ -5155,18 +5774,17 @@ free_vsi:
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
-static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
- if (vsi->tx_rings[0])
+ if (vsi->tx_rings && vsi->tx_rings[0]) {
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
-
- return 0;
+ }
}
/**
@@ -5183,6 +5801,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring;
+ /* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
@@ -5286,19 +5905,22 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* The number of vectors we'll request will be comprised of:
* - Add 1 for "other" cause for Admin Queue events, etc.
* - The number of LAN queue pairs
- * already adjusted for the NUMA node
- * assumes symmetric Tx/Rx pairing
+ * - Queues being used for RSS.
+ * We don't need as many as max_rss_size vectors.
+ * use rss_size instead in the calculation since that
+ * is governed by number of cpus in the system.
+ * - assumes symmetric Tx/Rx pairing
* - The number of VMDq pairs
* Once we count this up, try the request.
*
* If we can't get what we want, we'll simplify to nearly nothing
* and try again. If that still fails, we punt.
*/
- pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_lan_msix = pf->num_lan_qps - (pf->rss_size_max - pf->rss_size);
pf->num_vmdq_msix = pf->num_vmdq_qps;
v_budget = 1 + pf->num_lan_msix;
v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
- if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
v_budget++;
/* Scale down if necessary, and the rings will share vectors */
@@ -5434,14 +6056,13 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf);
if (err) {
- pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
/* rework the queue expectations without MSIX */
i40e_determine_queue_usage(pf);
@@ -5510,15 +6131,15 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
**/
static int i40e_config_rss(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- u32 lut = 0;
- int i, j;
- u64 hena;
/* Set of random keys generated using kernel random number generator */
static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -5527,16 +6148,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ hena |= I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
@@ -5565,6 +6177,34 @@ static int i40e_config_rss(struct i40e_pf *pf)
}
/**
+ * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
+ * @pf: board private structure
+ * @queue_count: the requested queue count for rss.
+ *
+ * returns 0 if rss is not enabled, if enabled returns the final rss queue
+ * count which may be different from the requested queue count.
+ **/
+int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
+{
+ if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
+ return 0;
+
+ queue_count = min_t(int, queue_count, pf->rss_size_max);
+ queue_count = rounddown_pow_of_two(queue_count);
+
+ if (queue_count != pf->rss_size) {
+ i40e_prep_for_reset(pf);
+
+ pf->rss_size = queue_count;
+
+ i40e_reset_and_rebuild(pf, true);
+ i40e_config_rss(pf);
+ }
+ dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size);
+ return pf->rss_size;
+}
+
+/**
* i40e_sw_init - Initialize general software structures (struct i40e_pf)
* @pf: board private structure to initialize
*
@@ -5579,6 +6219,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -5590,39 +6231,47 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_MSIX_ENABLED |
- I40E_FLAG_RX_PS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Depending on PF configurations, it is possible that the RSS
+ * maximum might end up larger than the available queues
+ */
pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ pf->rss_size_max = min_t(int, pf->rss_size_max,
+ pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
- pf->rss_size = min_t(int, pf->rss_size_max,
- nr_cpus_node(numa_node_id()));
+ pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
+ pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
- if (pf->hw.func_caps.dcb)
- pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
- else
- pf->num_tc_qps = 0;
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
- if (pf->hw.func_caps.fd) {
- /* FW/NVM is not yet fixed in this regard */
- if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
- (pf->hw.func_caps.fd_filters_best_effort > 0)) {
- pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
- dev_info(&pf->pdev->dev,
- "Flow Director ATR mode Enabled\n");
- pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
dev_info(&pf->pdev->dev,
"Flow Director Side Band mode Enabled\n");
- pf->fdir_pf_filter_count =
- pf->hw.func_caps.fd_filters_guaranteed;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Disabled in MFP mode\n");
}
- } else {
- pf->fdir_pf_filter_count = 0;
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ pf->hw.fdir_shared_filter_count =
+ pf->hw.func_caps.fd_filters_best_effort;
}
if (pf->hw.func_caps.vmdq) {
@@ -5631,12 +6280,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
}
- /* MFP mode enabled */
- if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
- pf->flags |= I40E_FLAG_MFP_ENABLED;
- dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
- }
-
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs) {
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -5644,6 +6287,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->num_req_vfs = min_t(int,
pf->hw.func_caps.num_vfs,
I40E_MAX_VF_COUNT);
+ dev_info(&pf->pdev->dev,
+ "Number of VFs being requested for PF[%d] = %d\n",
+ pf->hw.pf_id, pf->num_req_vfs);
}
#endif /* CONFIG_PCI_IOV */
pf->eeprom_version = 0xDEAD;
@@ -5698,6 +6344,104 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_I40E_VXLAN
+/**
+ * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
+ * @pf: board private structure
+ * @port: The UDP port to look up
+ *
+ * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
+ **/
+static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
+{
+ u8 i;
+
+ for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+ if (pf->vxlan_ports[i] == port)
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: New UDP port number that VXLAN started listening to
+ **/
+static void i40e_add_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 next_idx;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Port %d already offloaded\n", ntohs(port));
+ return;
+ }
+
+ /* Now check if there is space to add the new port */
+ next_idx = i40e_get_vxlan_port_idx(pf, 0);
+
+ if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ netdev_info(netdev, "Maximum number of UDP ports reached, not adding port %d\n",
+ ntohs(port));
+ return;
+ }
+
+ /* New port: add it and mark its index in the bitmap */
+ pf->vxlan_ports[next_idx] = port;
+ pf->pending_vxlan_bitmap |= (1 << next_idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+}
+
+/**
+ * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
+ * @netdev: This physical port's netdev
+ * @sa_family: Socket Family that VXLAN is notifying us about
+ * @port: UDP port number that VXLAN stopped listening to
+ **/
+static void i40e_del_vxlan_port(struct net_device *netdev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 idx;
+
+ if (sa_family == AF_INET6)
+ return;
+
+ idx = i40e_get_vxlan_port_idx(pf, port);
+
+ /* Check if port already exists */
+ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->vxlan_ports[idx] = 0;
+
+ pf->pending_vxlan_bitmap |= (1 << idx);
+
+ pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
+ } else {
+ netdev_warn(netdev, "Port %d was not found, not deleting\n",
+ ntohs(port));
+ }
+}
+
+#endif
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -5707,6 +6451,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = i40e_set_mac,
.ndo_change_mtu = i40e_change_mtu,
+ .ndo_do_ioctl = i40e_ioctl,
.ndo_tx_timeout = i40e_tx_timeout,
.ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
@@ -5719,6 +6464,10 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
.ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
+#ifdef CONFIG_I40E_VXLAN
+ .ndo_add_vxlan_port = i40e_add_vxlan_port,
+ .ndo_del_vxlan_port = i40e_del_vxlan_port,
+#endif
};
/**
@@ -5729,6 +6478,7 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
+ u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -5778,6 +6528,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
random_ether_addr(mac_addr);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
}
+ i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
@@ -5811,10 +6562,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
return;
- /* there is no HW VSI for FDIR */
- if (vsi->type == I40E_VSI_FDIR)
- return;
-
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
return;
}
@@ -5898,12 +6645,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
break;
case I40E_VSI_FDIR:
- /* no queue mapping or actual HW VSI needed */
- vsi->info.valid_sections = 0;
- vsi->seid = 0;
- vsi->id = 0;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
- return 0;
break;
case I40E_VSI_VMDQ2:
@@ -6130,6 +6877,69 @@ vector_setup_out:
}
/**
+ * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
+ * @vsi: pointer to the vsi.
+ *
+ * This re-allocates a vsi's queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct
+ * on success, otherwise returns NULL on failure.
+ **/
+static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc;
+ int ret;
+
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_free_arrays(vsi, false);
+ i40e_set_num_rings_in_vsi(vsi);
+ ret = i40e_vsi_alloc_arrays(vsi, false);
+ if (ret)
+ goto err_vsi;
+
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* Update the FW view of the VSI. Force a reset of TC and queue
+ * layout configurations.
+ */
+ enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+
+ /* assign it some queues */
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+ return NULL;
+}
+
+/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -6209,6 +7019,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (v_idx < 0)
goto err_alloc;
vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_alloc;
vsi->type = type;
vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
@@ -6217,7 +7029,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
else if (type == I40E_VSI_SRIOV)
vsi->vf_id = param1;
/* assign it some queues */
- ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
+ vsi->idx);
if (ret < 0) {
dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
vsi->seid, ret);
@@ -6243,6 +7056,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
goto err_netdev;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
+#ifdef CONFIG_I40E_DCB
+ /* Setup DCB netlink interface */
+ i40e_dcbnl_setup(vsi);
+#endif /* CONFIG_I40E_DCB */
/* fall through */
case I40E_VSI_FDIR:
@@ -6500,12 +7317,14 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
- bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ bool is_default = false;
+ bool is_cloud = false;
int ret;
/* get a VEB from the hardware */
ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default, &veb->seid, NULL);
+ veb->enabled_tc, is_default,
+ is_cloud, &veb->seid, NULL);
if (ret) {
dev_info(&veb->pf->pdev->dev,
"couldn't add VEB, err %d, aq_err %d\n",
@@ -6770,11 +7589,13 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
/**
* i40e_setup_pf_switch - Setup the HW switch on startup or after reset
* @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
*
* Returns 0 on success, negative value on failure
**/
-static int i40e_setup_pf_switch(struct i40e_pf *pf)
+static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u32 rxfc = 0, txfc = 0, rxfc_reg;
int ret;
/* find out what's out there already */
@@ -6787,14 +7608,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_pf_reset_stats(pf);
- /* fdir VSI must happen first to be sure it gets queue 0, but only
- * if there is enough room for the fdir VSI
- */
- if (pf->num_lan_qps > 1)
- i40e_fdir_setup(pf);
-
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI) {
+ if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
u16 uplink_seid;
@@ -6805,19 +7620,15 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
uplink_seid = pf->veb[pf->lan_veb]->seid;
else
uplink_seid = pf->mac_seid;
-
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (pf->lan_vsi == I40E_NO_VSI)
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ else if (reinit)
+ vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
if (!vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_fdir_teardown(pf);
return -EAGAIN;
}
- /* accommodate kcompat by copying the main VSI queue count
- * into the pf, since this newer code pushes the pf queue
- * info down a level into a VSI
- */
- pf->num_rx_queues = vsi->alloc_queue_pairs;
- pf->num_tx_queues = vsi->alloc_queue_pairs;
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
@@ -6827,6 +7638,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
}
i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_sb_setup(pf);
+
/* Setup static PF queue filter control settings */
ret = i40e_setup_pf_filter_control(pf);
if (ret) {
@@ -6845,37 +7658,68 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf)
i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
i40e_link_event(pf);
- /* Initialize user-specifics link properties */
+ /* Initialize user-specific link properties */
pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
I40E_AQ_AN_COMPLETED) ? true : false);
- pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
- if (pf->hw.phy.link_info.an_info &
- (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ /* requested_mode is set in probe or by ethtool */
+ if (!pf->fc_autoneg_status)
+ goto no_autoneg;
+
+ if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
+ (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
pf->hw.fc.current_mode = I40E_FC_FULL;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
else
- pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+ pf->hw.fc.current_mode = I40E_FC_NONE;
- return ret;
-}
+ /* sync the flow control settings with the auto-neg values */
+ switch (pf->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ txfc = 1;
+ rxfc = 1;
+ break;
+ case I40E_FC_TX_PAUSE:
+ txfc = 1;
+ rxfc = 0;
+ break;
+ case I40E_FC_RX_PAUSE:
+ txfc = 0;
+ rxfc = 1;
+ break;
+ case I40E_FC_NONE:
+ case I40E_FC_DEFAULT:
+ txfc = 0;
+ rxfc = 0;
+ break;
+ case I40E_FC_PFC:
+ /* TBD */
+ break;
+ /* no default case, we have to handle all possibilities here */
+ }
-/**
- * i40e_set_rss_size - helper to set rss_size
- * @pf: board private structure
- * @queues_left: how many queues
- */
-static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
-{
- int num_tc0;
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
+
+ rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
+
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
- num_tc0 = min_t(int, queues_left, pf->rss_size_max);
- num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
- num_tc0 = rounddown_pow_of_two(num_tc0);
+ goto fc_complete;
- return num_tc0;
+no_autoneg:
+ /* disable L2 flow control, user can turn it on if they wish */
+ wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
+ wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
+ ~I40E_PRTDCB_MFLCN_RFCE_MASK);
+
+fc_complete:
+ i40e_ptp_init(pf);
+
+ return ret;
}
/**
@@ -6884,12 +7728,9 @@ static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
**/
static void i40e_determine_queue_usage(struct i40e_pf *pf)
{
- int accum_tc_size;
int queues_left;
pf->num_lan_qps = 0;
- pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
- accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
/* Find the max queues to be put into basic use. We'll always be
* using TC0, whether or not DCB is running, and TC0 will get the
@@ -6897,99 +7738,45 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
*/
queues_left = pf->hw.func_caps.num_tx_qp;
- if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
- (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
- (queues_left == 1)) {
-
+ if ((queues_left == 1) ||
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_DCB_ENABLED))) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */
- pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_MQ_ENABLED |
- I40E_FLAG_FDIR_ENABLED |
- I40E_FLAG_FDIR_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
- I40E_FLAG_SRIOV_ENABLED |
- I40E_FLAG_VMDQ_ENABLED);
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save num_tc_qps queues for TCs 1 thru 7 and the rest
- * are set up for RSS in TC0
- */
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
- return;
- }
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- queues_left -= 1; /* save 1 queue for FD */
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
-
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
- return;
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+ } else {
+ /* Not enough queues for all TCs */
+ if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
+ pf->num_lan_qps = pf->rss_size_max;
+ queues_left -= pf->num_lan_qps;
+ }
- pf->num_lan_qps = pf->rss_size;
-
- } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
- (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
- (pf->flags & I40E_FLAG_DCB_ENABLED)) {
-
- /* save 1 queue for TCs 1 thru 7,
- * 1 queue for flow director,
- * and the rest are set up for RSS in TC0
- */
- queues_left -= 1;
- queues_left -= accum_tc_size;
-
- pf->rss_size = i40e_set_rss_size(pf, queues_left);
- queues_left -= pf->rss_size;
- if (queues_left < 0) {
- dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
- return;
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ if (queues_left > 1) {
+ queues_left -= 1; /* save 1 queue for FD */
+ } else {
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
}
-
- pf->num_lan_qps = pf->rss_size + accum_tc_size;
-
- } else {
- dev_info(&pf->pdev->dev,
- "Invalid configuration, flags=0x%08llx\n", pf->flags);
- return;
}
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
pf->num_vf_qps && pf->num_req_vfs && queues_left) {
- pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
- pf->num_vf_qps));
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs,
+ (queues_left / pf->num_vf_qps));
queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
}
@@ -7000,6 +7787,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
}
+ pf->queues_left = queues_left;
return;
}
@@ -7021,7 +7809,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
/* Flow Director is enabled */
- if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
settings->enable_fdir = true;
/* Ethtype and MACVLAN filters enabled for PF */
@@ -7050,6 +7838,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
+ static u16 pfs_found;
+ u16 link_status;
int err = 0;
u32 len;
@@ -7115,6 +7905,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->subsystem_device_id = pdev->subsystem_device;
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ pf->instance = pfs_found;
+
+ /* do a special CORER for clearing PXE mode once at init */
+ if (hw->revision_id == 0 &&
+ (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
+ wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
+ i40e_flush(hw);
+ msleep(200);
+ pf->corer_count++;
+
+ i40e_clear_pxe_mode(hw);
+ }
/* Reset here to make sure all is clean and to define PF 'n' */
err = i40e_pf_reset(hw);
@@ -7139,8 +7941,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ /* set up a default setting for link flow control */
+ pf->hw.fc.requested_mode = I40E_FC_NONE;
+
err = i40e_init_adminq(hw);
dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
+ dev_info(&pdev->dev,
+ "warning: NVM version not supported, supported version: %02x.%02x\n",
+ I40E_CURRENT_NVM_VERSION_HI,
+ I40E_CURRENT_NVM_VERSION_LO);
+ }
if (err) {
dev_info(&pdev->dev,
"init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7149,6 +7961,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
+ i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
goto err_adminq_setup;
@@ -7175,7 +7988,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_mac_addr(hw, hw->mac.addr);
- if (i40e_validate_mac_addr(hw->mac.addr)) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
err = -EIO;
goto err_mac_addr;
@@ -7185,6 +7998,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ err = i40e_init_pf_dcb(pf);
+ if (err) {
+ dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto err_init_dcb;
+ }
+#endif /* CONFIG_I40E_DCB */
/* set up periodic task facility */
setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
@@ -7195,6 +8016,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
pf->link_check_timeout = jiffies;
+ /* WoL defaults to disabled */
+ pf->wol_en = false;
+ device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
+
/* set up the main switch operations */
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
@@ -7209,7 +8034,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup;
}
- err = i40e_setup_pf_switch(pf);
+ err = i40e_setup_pf_switch(pf, false);
if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
@@ -7247,6 +8072,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_flush(hw);
}
+ pfs_found++;
+
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
@@ -7260,15 +8087,41 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&pf->service_timer,
round_jiffies(jiffies + pf->service_timer_period));
+ /* Get the negotiated link width and speed from PCI config space */
+ pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+
+ i40e_set_pci_config_data(hw, link_status);
+
+ dev_info(&pdev->dev, "PCI Express: %s %s\n",
+ (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
+ hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
+ "Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
+ hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
+ hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
+ hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
+ "Unknown"));
+
+ if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+ hw->bus.speed < i40e_bus_speed_8000) {
+ dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+
return 0;
/* Unwind what we've done if something failed in the setup */
err_vsis:
set_bit(__I40E_DOWN, &pf->state);
-err_switch_setup:
i40e_clear_interrupt_scheme(pf);
kfree(pf->vsi);
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
+#ifdef CONFIG_I40E_DCB
+err_init_dcb:
+#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -7310,6 +8163,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_dbg_pf_exit(pf);
+ i40e_ptp_stop(pf);
+
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -7353,7 +8208,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
- i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,
@@ -7410,7 +8264,11 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
/* shutdown all operations */
- i40e_pf_quiesce_all_vsi(pf);
+ if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+ }
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -7473,9 +8331,103 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
struct i40e_pf *pf = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "%s\n", __func__);
+ if (test_bit(__I40E_SUSPENDED, &pf->state))
+ return;
+
+ rtnl_lock();
i40e_handle_reset_warning(pf);
+ rtnl_lock();
}
+/**
+ * i40e_shutdown - PCI callback for shutting down
+ * @pdev: PCI device information struct
+ **/
+static void i40e_shutdown(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+}
+
+#ifdef CONFIG_PM
+/**
+ * i40e_suspend - PCI callback for moving to D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_SUSPENDED, &pf->state);
+ set_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_prep_for_reset(pf);
+ rtnl_unlock();
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ pci_wake_from_d3(pdev, pf->wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+/**
+ * i40e_resume - PCI callback for waking up from D3
+ * @pdev: PCI device information struct
+ **/
+static int i40e_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ u32 err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ /* pci_restore_state() clears dev->state_saves, so
+ * call pci_save_state() again to restore it.
+ */
+ pci_save_state(pdev);
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Cannot enable PCI device from suspend\n",
+ __func__);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ /* no wakeup events while running */
+ pci_wake_from_d3(pdev, false);
+
+ /* handling the reset will rebuild the device state */
+ if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
+ clear_bit(__I40E_DOWN, &pf->state);
+ rtnl_lock();
+ i40e_reset_and_rebuild(pf, false);
+ rtnl_unlock();
+ }
+
+ return 0;
+}
+
+#endif
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
@@ -7487,6 +8439,11 @@ static struct pci_driver i40e_driver = {
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
+#ifdef CONFIG_PM
+ .suspend = i40e_suspend,
+ .resume = i40e_resume,
+#endif
+ .shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
};