summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ibmvnic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmvnic.c')
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c136
1 files changed, 87 insertions, 49 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a07b8d79174c..9198e6bd5160 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -988,7 +988,7 @@ restart_poll:
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- napi_complete(napi);
+ napi_complete_done(napi, frames_processed);
if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
napi_reschedule(napi)) {
disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1026,21 +1026,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
/* ethtool functions */
-static int ibmvnic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int ibmvnic_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ u32 supported, advertising;
+
+ supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
SUPPORTED_FIBRE);
- cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+ advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
ADVERTISED_FIBRE);
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 1;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
@@ -1133,7 +1138,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops ibmvnic_ethtool_ops = {
- .get_settings = ibmvnic_get_settings,
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
.set_msglevel = ibmvnic_set_msglevel,
@@ -1142,6 +1146,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_strings = ibmvnic_get_strings,
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
+ .get_link_ksettings = ibmvnic_get_link_ksettings,
};
/* Routines for managing CRQs/sCRQs */
@@ -1255,8 +1260,6 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
}
adapter->rx_scrq = NULL;
}
-
- adapter->requested_caps = 0;
}
static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
@@ -1278,8 +1281,6 @@ static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
adapter->rx_scrq[i]);
adapter->rx_scrq = NULL;
}
-
- adapter->requested_caps = 0;
}
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
@@ -1567,30 +1568,36 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -1598,12 +1605,14 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
@@ -1955,112 +1964,112 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
- atomic_set(&adapter->running_cap_queries, 0);
+ atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_queries);
+ atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
}
@@ -2348,6 +2357,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
u64 *req_value;
char *name;
+ atomic_dec(&adapter->running_cap_crqs);
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -2402,12 +2412,13 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (++adapter->requested_caps == 7) {
+ if (atomic_read(&adapter->running_cap_crqs) == 0) {
union ibmvnic_crq newcrq;
int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
&adapter->ip_offload_buf;
+ adapter->wait_capability = false;
adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
buf_sz,
DMA_FROM_DEVICE);
@@ -2543,9 +2554,9 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
struct device *dev = &adapter->vdev->dev;
long rc;
- atomic_dec(&adapter->running_cap_queries);
+ atomic_dec(&adapter->running_cap_crqs);
netdev_dbg(netdev, "Outstanding queries: %d\n",
- atomic_read(&adapter->running_cap_queries));
+ atomic_read(&adapter->running_cap_crqs));
rc = crq->query_capability.rc.code;
if (rc) {
dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
@@ -2703,9 +2714,11 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_queries) == 0)
+ if (atomic_read(&adapter->running_cap_crqs) == 0) {
+ adapter->wait_capability = false;
init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
+ }
}
static void handle_control_ras_rsp(union ibmvnic_crq *crq,
@@ -3415,6 +3428,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
{
struct ibmvnic_adapter *adapter = instance;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->crq.lock, flags);
+ vio_disable_interrupts(adapter->vdev);
+ tasklet_schedule(&adapter->tasklet);
+ spin_unlock_irqrestore(&adapter->crq.lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void ibmvnic_tasklet(void *data)
+{
+ struct ibmvnic_adapter *adapter = data;
struct ibmvnic_crq_queue *queue = &adapter->crq;
struct vio_dev *vdev = adapter->vdev;
union ibmvnic_crq *crq;
@@ -3436,11 +3461,19 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
ibmvnic_handle_crq(crq, adapter);
crq->generic.first = 0;
} else {
- done = true;
+ /* remain in tasklet until all
+ * capabilities responses are received
+ */
+ if (!adapter->wait_capability)
+ done = true;
}
}
+ /* if capabilities CRQ's were sent in this tasklet, the following
+ * tasklet must wait until all responses are received
+ */
+ if (atomic_read(&adapter->running_cap_crqs) != 0)
+ adapter->wait_capability = true;
spin_unlock_irqrestore(&queue->lock, flags);
- return IRQ_HANDLED;
}
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
@@ -3495,6 +3528,7 @@ static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing CRQ\n");
free_irq(vdev->irq, adapter);
+ tasklet_kill(&adapter->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
@@ -3540,6 +3574,9 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
+ tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
+ (unsigned long)adapter);
+
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
adapter);
@@ -3561,6 +3598,7 @@ static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
return retrc;
req_irq_failed:
+ tasklet_kill(&adapter->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));