summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-04-29 07:55:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-29 07:55:45 -0700
commit3dacbdad2401c06b97d8d754974233a70c165536 (patch)
treedc33467f26190572eabf49f7dcac3cd8aa50a101 /drivers/net
parent56a50adda49b2020156616c4eb15353e0f9ad7de (diff)
parentac7c992cac0c8f276aa8e4a8273204a6db707bb3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits) e100: do not go D3 in shutdown unless system is powering off netfilter: revised locking for x_tables Bluetooth: Fix connection establishment with low security requirement Bluetooth: Add different pairing timeout for Legacy Pairing Bluetooth: Ensure that HCI sysfs add/del is preempt safe net: Avoid extra wakeups of threads blocked in wait_for_packet() net: Fix typo in net_device_ops description. ipv4: Limit size of route cache hash table Add reference to CAPI 2.0 standard Documentation/isdn/INTERFACE.CAPI update Documentation/isdn/00-INDEX ixgbe: Fix WoL functionality for 82599 KX4 devices veth: prevent oops caused by netdev destructor xfrm: wrong hash value for temporary SA forcedeth: tx timeout fix net: Fix LL_MAX_HEADER for CONFIG_TR_MODULE mlx4_en: Handle page allocation failure during receive mlx4_en: Fix cleanup flow on cq activation vlan: update vlan carrier state for admin up/down netfilter: xt_recent: fix stack overread in compat code ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/e100.c30
-rw-r--r--drivers/net/forcedeth.c31
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c51
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/mlx4/en_netdev.c2
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/veth.c41
7 files changed, 71 insertions, 98 deletions
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 5c0b457c7868..0f9ee1348552 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2728,7 +2728,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
-static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
@@ -2749,19 +2749,32 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
E100_82552_SMARTSPEED, smartspeed |
E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
}
- if (pci_enable_wake(pdev, PCI_D3cold, true))
- pci_enable_wake(pdev, PCI_D3hot, true);
+ *enable_wake = true;
} else {
- pci_enable_wake(pdev, PCI_D3hot, false);
+ *enable_wake = false;
}
pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+}
- return 0;
+static int __e100_power_off(struct pci_dev *pdev, bool wake)
+{
+ if (wake) {
+ return pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ return pci_set_power_state(pdev, PCI_D3hot);
+ }
}
#ifdef CONFIG_PM
+static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ bool wake;
+ __e100_shutdown(pdev, &wake);
+ return __e100_power_off(pdev, wake);
+}
+
static int e100_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2792,7 +2805,10 @@ static int e100_resume(struct pci_dev *pdev)
static void e100_shutdown(struct pci_dev *pdev)
{
- e100_suspend(pdev, PMSG_SUSPEND);
+ bool wake;
+ __e100_shutdown(pdev, &wake);
+ if (system_state == SYSTEM_POWER_OFF)
+ __e100_power_off(pdev, wake);
}
/* ------------------ PCI Error Recovery infrastructure -------------- */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11d5db16ed9c..f9a846b1b92f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1880,6 +1880,7 @@ static void nv_init_tx(struct net_device *dev)
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
+ np->tx_stop = 0;
for (i = 0; i < np->tx_ring_size; i++) {
if (!nv_optimized(np)) {
@@ -2530,6 +2531,8 @@ static void nv_tx_timeout(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
u32 status;
+ union ring_type put_tx;
+ int saved_tx_limit;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
@@ -2589,24 +2592,32 @@ static void nv_tx_timeout(struct net_device *dev)
/* 1) stop tx engine */
nv_stop_tx(dev);
- /* 2) check that the packets were not sent already: */
+ /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
+ saved_tx_limit = np->tx_limit;
+ np->tx_limit = 0; /* prevent giving HW any limited pkts */
+ np->tx_stop = 0; /* prevent waking tx queue */
if (!nv_optimized(np))
nv_tx_done(dev, np->tx_ring_size);
else
nv_tx_done_optimized(dev, np->tx_ring_size);
- /* 3) if there are dead entries: clear everything */
- if (np->get_tx_ctx != np->put_tx_ctx) {
- printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
- nv_drain_tx(dev);
- nv_init_tx(dev);
- setup_hw_rings(dev, NV_SETUP_TX_RING);
- }
+ /* save current HW postion */
+ if (np->tx_change_owner)
+ put_tx.ex = np->tx_change_owner->first_tx_desc;
+ else
+ put_tx = np->put_tx;
- netif_wake_queue(dev);
+ /* 3) clear all tx state */
+ nv_drain_tx(dev);
+ nv_init_tx(dev);
+
+ /* 4) restore state to current HW position */
+ np->get_tx = np->put_tx = put_tx;
+ np->tx_limit = saved_tx_limit;
- /* 4) restart tx engine */
+ /* 5) restart tx engine */
nv_start_tx(dev);
+ netif_wake_queue(dev);
spin_unlock_irq(&np->lock);
}
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 5567519676d5..186a65069b33 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -50,7 +50,6 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
/**
@@ -1377,8 +1376,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
* Clear accounting of old secondary address list,
* don't count RAR[0]
*/
- uc_addr_in_use = hw->addr_ctrl.rar_used_count -
- hw->addr_ctrl.mc_addr_in_rar_count - 1;
+ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
hw->addr_ctrl.overflow_promisc = 0;
@@ -1493,40 +1491,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
}
/**
- * ixgbe_add_mc_addr - Adds a multicast address.
- * @hw: pointer to hardware structure
- * @mc_addr: new multicast address
- *
- * Adds it to unused receive address register or to the multicast table.
- **/
-static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
-{
- u32 rar_entries = hw->mac.num_rar_entries;
- u32 rar;
-
- hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
- mc_addr[0], mc_addr[1], mc_addr[2],
- mc_addr[3], mc_addr[4], mc_addr[5]);
-
- /*
- * Place this multicast address in the RAR if there is room,
- * else put it in the MTA
- */
- if (hw->addr_ctrl.rar_used_count < rar_entries) {
- /* use RAR from the end up for multicast */
- rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
- hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
- hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
- hw->addr_ctrl.rar_used_count++;
- hw->addr_ctrl.mc_addr_in_rar_count++;
- } else {
- ixgbe_set_mta(hw, mc_addr);
- }
-
- hw_dbg(hw, "ixgbe_add_mc_addr Complete\n");
-}
-
-/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
* @mc_addr_list: the list of new multicast addresses
@@ -1542,7 +1506,6 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next)
{
u32 i;
- u32 rar_entries = hw->mac.num_rar_entries;
u32 vmdq;
/*
@@ -1550,18 +1513,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
* use.
*/
hw->addr_ctrl.num_mc_addrs = mc_addr_count;
- hw->addr_ctrl.rar_used_count -= hw->addr_ctrl.mc_addr_in_rar_count;
- hw->addr_ctrl.mc_addr_in_rar_count = 0;
hw->addr_ctrl.mta_in_use = 0;
- /* Zero out the other receive addresses. */
- hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
- rar_entries - 1);
- for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
- }
-
/* Clear the MTA */
hw_dbg(hw, " Clearing MTA\n");
for (i = 0; i < hw->mac.mcft_size; i++)
@@ -1570,7 +1523,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
/* Add the new addresses */
for (i = 0; i < mc_addr_count; i++) {
hw_dbg(hw, " Adding the multicast addresses:\n");
- ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
+ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
}
/* Enable mta */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 01884256f4c9..07e778d3e5d2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3646,6 +3646,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
ixgbe_reset(adapter);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
if (netif_running(netdev)) {
err = ixgbe_open(adapter->netdev);
if (err)
@@ -4575,7 +4577,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
- u16 pm_value = 0;
u32 part_num, eec;
err = pci_enable_device(pdev);
@@ -4763,11 +4764,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
switch (pdev->device) {
case IXGBE_DEV_ID_82599_KX4:
-#define IXGBE_PCIE_PMCSR 0x44
- adapter->wol = IXGBE_WUFC_MAG;
- pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value);
- pci_write_config_word(pdev, IXGBE_PCIE_PMCSR,
- (pm_value | (1 << 8)));
+ adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+ IXGBE_WUFC_MC | IXGBE_WUFC_BC);
break;
default:
adapter->wol = 0;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 438678ab2a10..7bcc49de1637 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -583,7 +583,7 @@ int mlx4_en_start_port(struct net_device *dev)
err = mlx4_en_activate_cq(priv, cq);
if (err) {
mlx4_err(mdev, "Failed activating Rx CQ\n");
- goto rx_err;
+ goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 0cbb78ca7b29..7942c4d3cd88 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -610,6 +610,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
skb_shinfo(skb)->frags,
page_alloc, length);
+ if (unlikely(!used_frags)) {
+ kfree_skb(skb);
+ return NULL;
+ }
skb_shinfo(skb)->nr_frags = used_frags;
/* Copy headers into the skb linear buffer */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 015db1cece72..8e56fcf0a0e3 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -210,14 +210,11 @@ rx_drop:
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
- struct veth_priv *priv;
- struct net_device_stats *dev_stats;
- int cpu;
+ struct veth_priv *priv = netdev_priv(dev);
+ struct net_device_stats *dev_stats = &dev->stats;
+ unsigned int cpu;
struct veth_net_stats *stats;
- priv = netdev_priv(dev);
- dev_stats = &dev->stats;
-
dev_stats->rx_packets = 0;
dev_stats->tx_packets = 0;
dev_stats->rx_bytes = 0;
@@ -225,16 +222,17 @@ static struct net_device_stats *veth_get_stats(struct net_device *dev)
dev_stats->tx_dropped = 0;
dev_stats->rx_dropped = 0;
- for_each_online_cpu(cpu) {
- stats = per_cpu_ptr(priv->stats, cpu);
+ if (priv->stats)
+ for_each_online_cpu(cpu) {
+ stats = per_cpu_ptr(priv->stats, cpu);
- dev_stats->rx_packets += stats->rx_packets;
- dev_stats->tx_packets += stats->tx_packets;
- dev_stats->rx_bytes += stats->rx_bytes;
- dev_stats->tx_bytes += stats->tx_bytes;
- dev_stats->tx_dropped += stats->tx_dropped;
- dev_stats->rx_dropped += stats->rx_dropped;
- }
+ dev_stats->rx_packets += stats->rx_packets;
+ dev_stats->tx_packets += stats->tx_packets;
+ dev_stats->rx_bytes += stats->rx_bytes;
+ dev_stats->tx_bytes += stats->tx_bytes;
+ dev_stats->tx_dropped += stats->tx_dropped;
+ dev_stats->rx_dropped += stats->rx_dropped;
+ }
return dev_stats;
}
@@ -261,6 +259,8 @@ static int veth_close(struct net_device *dev)
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
+ free_percpu(priv->stats);
+ priv->stats = NULL;
return 0;
}
@@ -291,15 +291,6 @@ static int veth_dev_init(struct net_device *dev)
return 0;
}
-static void veth_dev_free(struct net_device *dev)
-{
- struct veth_priv *priv;
-
- priv = netdev_priv(dev);
- free_percpu(priv->stats);
- free_netdev(dev);
-}
-
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -317,7 +308,7 @@ static void veth_setup(struct net_device *dev)
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
- dev->destructor = veth_dev_free;
+ dev->destructor = free_netdev;
}
/*