summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/au1000_eth.c18
-rw-r--r--drivers/net/dl2k.c12
-rw-r--r--drivers/net/forcedeth.c312
-rw-r--r--drivers/net/hamradio/dmascc.c1
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c14
-rw-r--r--drivers/net/mv643xx_eth.c2
-rw-r--r--drivers/net/ne.c31
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/sky2.c222
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h2
-rw-r--r--drivers/net/sungem_phy.c45
-rw-r--r--drivers/net/sungem_phy.h1
-rw-r--r--drivers/net/tg3.c85
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/via-rhine.c6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c45
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c7
29 files changed, 865 insertions, 809 deletions
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 1363083b4d83..14dbad14afb6 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -52,6 +52,7 @@
#include <linux/mii.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
+#include <linux/crc32.h>
#include <asm/mipsregs.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -2070,23 +2071,6 @@ static void au1000_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-
-static unsigned const ethernet_polynomial = 0x04c11db7U;
-static inline u32 ether_crc(int length, unsigned char *data)
-{
- int crc = -1;
-
- while(--length >= 0) {
- unsigned char current_octet = *data++;
- int bit;
- for (bit = 0; bit < 8; bit++, current_octet >>= 1)
- crc = (crc << 1) ^
- ((crc < 0) ^ (current_octet & 1) ?
- ethernet_polynomial : 0);
- }
- return crc;
-}
-
static void set_rx_mode(struct net_device *dev)
{
struct au1000_private *aup = (struct au1000_private *) dev->priv;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 1f3627470c95..1ddefd281213 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo & 0xffffffffffff,
+ np->tx_ring[entry].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
@@ -893,7 +893,7 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev,
- desc->fraginfo & 0xffffffffffff,
+ desc->fraginfo & DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
@@ -901,7 +901,7 @@ receive_packet (struct net_device *dev)
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
pci_dma_sync_single_for_cpu(np->pdev,
desc->fraginfo &
- 0xffffffffffff,
+ DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb->dev = dev;
@@ -913,7 +913,7 @@ receive_packet (struct net_device *dev)
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc->fraginfo &
- 0xffffffffffff,
+ DMA_48BIT_MASK,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
@@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
- np->rx_ring[i].fraginfo & 0xffffffffffff,
+ np->rx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
@@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(np->pdev,
- np->tx_ring[i].fraginfo & 0xffffffffffff,
+ np->tx_ring[i].fraginfo & DMA_48BIT_MASK,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9788b1ef2e7d..f7235c9bc421 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,6 +106,7 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
+ * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
@@ -117,7 +118,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
-#define FORCEDETH_VERSION "0.53"
+#define FORCEDETH_VERSION "0.54"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
@@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
}
}
+static int using_multi_irqs(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+ ((np->msi_flags & NV_MSI_X_ENABLED) &&
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ return 0;
+ else
+ return 1;
+}
+
+static void nv_enable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
+ } else {
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+static void nv_disable_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
+ } else {
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+ }
+}
+
+/* In MSIX mode, a write to irqmask behaves as XOR */
+static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ u8 __iomem *base = get_hwbase(dev);
+
+ writel(mask, base + NvRegIrqMask);
+}
+
+static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ writel(mask, base + NvRegIrqMask);
+ } else {
+ if (np->msi_flags & NV_MSI_ENABLED)
+ writel(0, base + NvRegMSIIrqMask);
+ writel(0, base + NvRegIrqMask);
+ }
+}
+
#define MII_READ (-1)
/* mii_rw: read/write a register on the PHY.
*
@@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
-
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- enable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
@@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter.
*/
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
- } else {
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
+ nv_disable_irq(dev);
spin_lock_bh(&dev->xmit_lock);
spin_lock(&np->lock);
/* stop engines */
@@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx(dev);
spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock);
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- enable_irq(dev->irq);
- } else {
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
- enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
- }
+ nv_enable_irq(dev);
}
return 0;
}
@@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask))
break;
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_tx_done(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process(dev);
if (nv_alloc_rx(dev)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base);
@@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break;
if (events & NVREG_IRQ_LINK) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_link_irq(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
}
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
nv_linkchange(dev);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev->name, events);
}
if (i > max_interrupt_work) {
- spin_lock(&np->lock);
+ spin_lock_irq(&np->lock);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
- spin_unlock(&np->lock);
+ spin_unlock_irq(&np->lock);
break;
}
@@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise
*/
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
- disable_irq(dev->irq);
+ if (!using_multi_irqs(dev)) {
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ disable_irq(dev->irq);
mask = np->irqmask;
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
writel(mask, base + NvRegIrqMask);
pci_push(base);
- if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+ if (!using_multi_irqs(dev)) {
nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
- enable_irq(dev->irq);
+ if (np->msi_flags & NV_MSI_X_ENABLED)
+ enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
+ else
+ enable_irq(dev->irq);
} else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
}
+static int nv_request_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ int ret = 1;
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_CAPABLE) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ np->msi_x_entry[i].entry = i;
+ }
+ if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+ np->msi_flags |= NV_MSI_X_ENABLED;
+ if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
+ /* Request irq for rx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+ /* Request irq for tx handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_rx;
+ }
+ /* Request irq for link and timer handling */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_free_tx;
+ }
+ /* map interrupts to their respective vector */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+ set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+ } else {
+ /* Request irq for all interrupts */
+ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIXMap0);
+ writel(0, base + NvRegMSIXMap1);
+ }
+ }
+ }
+ if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+ np->msi_flags |= NV_MSI_ENABLED;
+ if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
+ printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ goto out_err;
+ }
+
+ /* map interrupts to vector 0 */
+ writel(0, base + NvRegMSIMap0);
+ writel(0, base + NvRegMSIMap1);
+ /* enable msi vector 0 */
+ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ }
+ }
+ if (ret != 0) {
+ if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
+ goto out_err;
+ }
+
+ return 0;
+out_free_tx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
+out_free_rx:
+ free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
+out_err:
+ return 1;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+ struct fe_priv *np = get_nvpriv(dev);
+ int i;
+
+ if (np->msi_flags & NV_MSI_X_ENABLED) {
+ for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+ free_irq(np->msi_x_entry[i].vector, dev);
+ }
+ pci_disable_msix(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_X_ENABLED;
+ } else {
+ free_irq(np->pci_dev->irq, dev);
+ if (np->msi_flags & NV_MSI_ENABLED) {
+ pci_disable_msi(np->pci_dev);
+ np->msi_flags &= ~NV_MSI_ENABLED;
+ }
+ }
+}
+
static int nv_open(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev)
udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
- writel(0, base + NvRegIrqMask);
+ nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
+ if (nv_request_irq(dev)) {
+ goto out_drain;
+ }
+
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i;
@@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev)
}
/* ask for interrupts */
- writel(np->irqmask, base + NvRegIrqMask);
+ nv_enable_hw_interrupts(dev, np->irqmask);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base;
- int i;
spin_lock_irq(&np->lock);
np->in_shutdown = 1;
@@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev);
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- writel(np->irqmask, base + NvRegIrqMask);
- } else {
- if (np->msi_flags & NV_MSI_ENABLED)
- writel(0, base + NvRegMSIIrqMask);
- writel(0, base + NvRegIrqMask);
- }
+ nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock);
- if (np->msi_flags & NV_MSI_X_ENABLED) {
- for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
- free_irq(np->msi_x_entry[i].vector, dev);
- }
- pci_disable_msix(np->pci_dev);
- np->msi_flags &= ~NV_MSI_X_ENABLED;
- } else {
- free_irq(np->pci_dev->irq, dev);
- if (np->msi_flags & NV_MSI_ENABLED) {
- pci_disable_msi(np->pci_dev);
- np->msi_flags &= ~NV_MSI_ENABLED;
- }
- }
+ nv_free_irq(dev);
drain_ring(dev);
@@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3;
+ np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev));
} else {
- if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
- printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
- pci_name(pci_dev));
- goto out_relreg;
- } else {
- dev->features |= NETIF_F_HIGHDMA;
- printk(KERN_INFO "forcedeth: using HIGHDMA\n");
- }
+ dev->features |= NETIF_F_HIGHDMA;
+ printk(KERN_INFO "forcedeth: using HIGHDMA\n");
+ }
+ if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+ printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
+ pci_name(pci_dev));
}
- np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 79a8fbcf5f93..0d5fccc984bb 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
INIT_WORK(&priv->rx_work, rx_bh, priv);
dev->priv = priv;
sprintf(dev->name, "dmascc%i", 2 * n + i);
- SET_MODULE_OWNER(dev);
dev->base_addr = card_base;
dev->irq = irq;
dev->open = scc_open;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index 6ace0e914fd1..5927784df3f9 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
static void scc_net_setup(struct net_device *dev)
{
- SET_MODULE_OWNER(dev);
dev->tx_queue_len = 16; /* should be enough... */
dev->open = scc_net_open;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index fe22479eb202..b49884048caa 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
dev->base_addr = yp->iobase;
dev->irq = yp->irq;
- SET_MODULE_OWNER(dev);
dev->open = yam_open;
dev->stop = yam_close;
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
# The SIR helper module
-sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o
+sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 96bdb73c2283..cd87593e4e8a 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1778,7 +1778,7 @@ static int irda_usb_probe(struct usb_interface *intf,
if (self->needspatch) {
ret = usb_control_msg (self->usbdev, usb_sndctrlpipe (self->usbdev, 0),
- 0x02, 0x40, 0, 0, 0, 0, msecs_to_jiffies(500));
+ 0x02, 0x40, 0, 0, NULL, 0, 500);
if (ret < 0) {
IRDA_DEBUG (0, "usb_control_msg failed %d\n", ret);
goto err_out_3;
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
#define IRDA_SIR_H
#include <linux/netdevice.h>
+#include <linux/workqueue.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h> // iobuff_t
-/* FIXME: unify irda_request with sir_fsm! */
-
-struct irda_request {
- struct list_head lh_request;
- unsigned long pending;
- void (*func)(void *);
- void *data;
- struct timer_list timer;
-};
-
struct sir_fsm {
struct semaphore sem;
- struct irda_request rq;
+ struct work_struct work;
unsigned state, substate;
int param;
int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
#include "sir-dev.h"
+
+static struct workqueue_struct *irda_sir_wq;
+
+/* STATE MACHINE */
+
+/* substate handler of the config-fsm to handle the cases where we want
+ * to wait for transmit completion before changing the port configuration
+ */
+
+static int sirdev_tx_complete_fsm(struct sir_dev *dev)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+ unsigned next_state, delay;
+ unsigned bytes_left;
+
+ do {
+ next_state = fsm->substate; /* default: stay in current substate */
+ delay = 0;
+
+ switch(fsm->substate) {
+
+ case SIRDEV_STATE_WAIT_XMIT:
+ if (dev->drv->chars_in_buffer)
+ bytes_left = dev->drv->chars_in_buffer(dev);
+ else
+ bytes_left = 0;
+ if (!bytes_left) {
+ next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
+ break;
+ }
+
+ if (dev->speed > 115200)
+ delay = (bytes_left*8*10000) / (dev->speed/100);
+ else if (dev->speed > 0)
+ delay = (bytes_left*10*10000) / (dev->speed/100);
+ else
+ delay = 0;
+ /* expected delay (usec) until remaining bytes are sent */
+ if (delay < 100) {
+ udelay(delay);
+ delay = 0;
+ break;
+ }
+ /* sleep some longer delay (msec) */
+ delay = (delay+999) / 1000;
+ break;
+
+ case SIRDEV_STATE_WAIT_UNTIL_SENT:
+ /* block until underlaying hardware buffer are empty */
+ if (dev->drv->wait_until_sent)
+ dev->drv->wait_until_sent(dev);
+ next_state = SIRDEV_STATE_TX_DONE;
+ break;
+
+ case SIRDEV_STATE_TX_DONE:
+ return 0;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ fsm->substate = next_state;
+ } while (delay == 0);
+ return delay;
+}
+
+/*
+ * Function sirdev_config_fsm
+ *
+ * State machine to handle the configuration of the device (and attached dongle, if any).
+ * This handler is scheduled for execution in kIrDAd context, so we can sleep.
+ * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
+ * long. Instead, for longer delays we start a timer to reschedule us later.
+ * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
+ * Both must be unlocked/restarted on completion - but only on final exit.
+ */
+
+static void sirdev_config_fsm(void *data)
+{
+ struct sir_dev *dev = data;
+ struct sir_fsm *fsm = &dev->fsm;
+ int next_state;
+ int ret = -1;
+ unsigned delay;
+
+ IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
+
+ do {
+ IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
+ __FUNCTION__, fsm->state, fsm->substate);
+
+ next_state = fsm->state;
+ delay = 0;
+
+ switch(fsm->state) {
+
+ case SIRDEV_STATE_DONGLE_OPEN:
+ if (dev->dongle_drv != NULL) {
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+
+ /* Initialize dongle */
+ ret = sirdev_get_dongle(dev, fsm->param);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ /* Dongles are powered through the modem control lines which
+ * were just set during open. Before resetting, let's wait for
+ * the power to stabilize. This is what some dongle drivers did
+ * in open before, while others didn't - should be safe anyway.
+ */
+
+ delay = 50;
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+
+ fsm->param = 9600;
+
+ break;
+
+ case SIRDEV_STATE_DONGLE_CLOSE:
+ /* shouldn't we just treat this as success=? */
+ if (dev->dongle_drv == NULL) {
+ fsm->result = -EINVAL;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+
+ ret = sirdev_put_dongle(dev);
+ if (ret) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_DTR_RTS:
+ ret = sirdev_set_dtr_rts(dev,
+ (fsm->param&0x02) ? TRUE : FALSE,
+ (fsm->param&0x01) ? TRUE : FALSE);
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_SET_SPEED:
+ fsm->substate = SIRDEV_STATE_WAIT_XMIT;
+ next_state = SIRDEV_STATE_DONGLE_CHECK;
+ break;
+
+ case SIRDEV_STATE_DONGLE_CHECK:
+ ret = sirdev_tx_complete_fsm(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ if ((delay=ret) != 0)
+ break;
+
+ if (dev->dongle_drv) {
+ fsm->substate = SIRDEV_STATE_DONGLE_RESET;
+ next_state = SIRDEV_STATE_DONGLE_RESET;
+ }
+ else {
+ dev->speed = fsm->param;
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_RESET:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->reset(dev);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0) {
+ /* set serial port according to dongle default speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
+ next_state = SIRDEV_STATE_DONGLE_SPEED;
+ }
+ break;
+
+ case SIRDEV_STATE_DONGLE_SPEED:
+ if (dev->dongle_drv->reset) {
+ ret = dev->dongle_drv->set_speed(dev, fsm->param);
+ if (ret < 0) {
+ fsm->result = ret;
+ next_state = SIRDEV_STATE_ERROR;
+ break;
+ }
+ }
+ else
+ ret = 0;
+ if ((delay=ret) == 0)
+ next_state = SIRDEV_STATE_PORT_SPEED;
+ break;
+
+ case SIRDEV_STATE_PORT_SPEED:
+ /* Finally we are ready to change the serial port speed */
+ if (dev->drv->set_speed)
+ dev->drv->set_speed(dev, dev->speed);
+ dev->new_speed = 0;
+ next_state = SIRDEV_STATE_DONE;
+ break;
+
+ case SIRDEV_STATE_DONE:
+ /* Signal network layer so it can send more frames */
+ netif_wake_queue(dev->netdev);
+ next_state = SIRDEV_STATE_COMPLETE;
+ break;
+
+ default:
+ IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
+ fsm->result = -EINVAL;
+ /* fall thru */
+
+ case SIRDEV_STATE_ERROR:
+ IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
+
+#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
+ netif_stop_queue(dev->netdev);
+#else
+ netif_wake_queue(dev->netdev);
+#endif
+ /* fall thru */
+
+ case SIRDEV_STATE_COMPLETE:
+ /* config change finished, so we are not busy any longer */
+ sirdev_enable_rx(dev);
+ up(&fsm->sem);
+ return;
+ }
+ fsm->state = next_state;
+ } while(!delay);
+
+ queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
+}
+
+/* schedule some device configuration task for execution by kIrDAd
+ * on behalf of the above state machine.
+ * can be called from process or interrupt/tasklet context.
+ */
+
+int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
+{
+ struct sir_fsm *fsm = &dev->fsm;
+
+ IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
+
+ if (down_trylock(&fsm->sem)) {
+ if (in_interrupt() || in_atomic() || irqs_disabled()) {
+ IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
+ return -EWOULDBLOCK;
+ } else
+ down(&fsm->sem);
+ }
+
+ if (fsm->state == SIRDEV_STATE_DEAD) {
+ /* race with sirdev_close should never happen */
+ IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
+ up(&fsm->sem);
+ return -ESTALE; /* or better EPIPE? */
+ }
+
+ netif_stop_queue(dev->netdev);
+ atomic_set(&dev->enable_rx, 0);
+
+ fsm->state = initial_state;
+ fsm->param = param;
+ fsm->result = 0;
+
+ INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
+ queue_work(irda_sir_wq, &fsm->work);
+ return 0;
+}
+
+
/***************************************************************************/
void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
spin_lock_init(&dev->tx_lock);
init_MUTEX(&dev->fsm.sem);
- INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
- dev->fsm.rq.pending = 0;
- init_timer(&dev->fsm.rq.timer);
-
dev->drv = drv;
dev->netdev = ndev;
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
}
EXPORT_SYMBOL(sirdev_put_instance);
+static int __init sir_wq_init(void)
+{
+ irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
+ if (!irda_sir_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+static void __exit sir_wq_exit(void)
+{
+ destroy_workqueue(irda_sir_wq);
+}
+
+module_init(sir_wq_init);
+module_exit(sir_wq_exit);
+
+MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
+MODULE_DESCRIPTION("IrDA SIR core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
-/*********************************************************************
- *
- * sir_kthread.c: dedicated thread to process scheduled
- * sir device setup requests
- *
- * Copyright (c) 2002 Martin Diehl
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- ********************************************************************/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-
-#include <net/irda/irda.h>
-
-#include "sir-dev.h"
-
-/**************************************************************************
- *
- * kIrDAd kernel thread and config state machine
- *
- */
-
-struct irda_request_queue {
- struct list_head request_list;
- spinlock_t lock;
- task_t *thread;
- struct completion exit;
- wait_queue_head_t kick, done;
- atomic_t num_pending;
-};
-
-static struct irda_request_queue irda_rq_queue;
-
-static int irda_queue_request(struct irda_request *rq)
-{
- int ret = 0;
- unsigned long flags;
-
- if (!test_and_set_bit(0, &rq->pending)) {
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
- wake_up(&irda_rq_queue.kick);
- atomic_inc(&irda_rq_queue.num_pending);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
- ret = 1;
- }
- return ret;
-}
-
-static void irda_request_timer(unsigned long data)
-{
- struct irda_request *rq = (struct irda_request *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
- wake_up(&irda_rq_queue.kick);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-}
-
-static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
-{
- int ret = 0;
- struct timer_list *timer = &rq->timer;
-
- if (!test_and_set_bit(0, &rq->pending)) {
- timer->expires = jiffies + delay;
- timer->function = irda_request_timer;
- timer->data = (unsigned long)rq;
- atomic_inc(&irda_rq_queue.num_pending);
- add_timer(timer);
- ret = 1;
- }
- return ret;
-}
-
-static void run_irda_queue(void)
-{
- unsigned long flags;
- struct list_head *entry, *tmp;
- struct irda_request *rq;
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
- rq = list_entry(entry, struct irda_request, lh_request);
- list_del_init(entry);
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-
- clear_bit(0, &rq->pending);
- rq->func(rq->data);
-
- if (atomic_dec_and_test(&irda_rq_queue.num_pending))
- wake_up(&irda_rq_queue.done);
-
- spin_lock_irqsave(&irda_rq_queue.lock, flags);
- }
- spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
-}
-
-static int irda_thread(void *startup)
-{
- DECLARE_WAITQUEUE(wait, current);
-
- daemonize("kIrDAd");
-
- irda_rq_queue.thread = current;
-
- complete((struct completion *)startup);
-
- while (irda_rq_queue.thread != NULL) {
-
- /* We use TASK_INTERRUPTIBLE, rather than
- * TASK_UNINTERRUPTIBLE. Andrew Morton made this
- * change ; he told me that it is safe, because "signal
- * blocking is now handled in daemonize()", he added
- * that the problem is that "uninterruptible sleep
- * contributes to load average", making user worry.
- * Jean II */
- set_task_state(current, TASK_INTERRUPTIBLE);
- add_wait_queue(&irda_rq_queue.kick, &wait);
- if (list_empty(&irda_rq_queue.request_list))
- schedule();
- else
- __set_task_state(current, TASK_RUNNING);
- remove_wait_queue(&irda_rq_queue.kick, &wait);
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- run_irda_queue();
- }
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
- reparent_to_init();
-#endif
- complete_and_exit(&irda_rq_queue.exit, 0);
- /* never reached */
- return 0;
-}
-
-
-static void flush_irda_queue(void)
-{
- if (atomic_read(&irda_rq_queue.num_pending)) {
-
- DECLARE_WAITQUEUE(wait, current);
-
- if (!list_empty(&irda_rq_queue.request_list))
- run_irda_queue();
-
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- add_wait_queue(&irda_rq_queue.done, &wait);
- if (atomic_read(&irda_rq_queue.num_pending))
- schedule();
- else
- __set_task_state(current, TASK_RUNNING);
- remove_wait_queue(&irda_rq_queue.done, &wait);
- }
-}
-
-/* substate handler of the config-fsm to handle the cases where we want
- * to wait for transmit completion before changing the port configuration
- */
-
-static int irda_tx_complete_fsm(struct sir_dev *dev)
-{
- struct sir_fsm *fsm = &dev->fsm;
- unsigned next_state, delay;
- unsigned bytes_left;
-
- do {
- next_state = fsm->substate; /* default: stay in current substate */
- delay = 0;
-
- switch(fsm->substate) {
-
- case SIRDEV_STATE_WAIT_XMIT:
- if (dev->drv->chars_in_buffer)
- bytes_left = dev->drv->chars_in_buffer(dev);
- else
- bytes_left = 0;
- if (!bytes_left) {
- next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
- break;
- }
-
- if (dev->speed > 115200)
- delay = (bytes_left*8*10000) / (dev->speed/100);
- else if (dev->speed > 0)
- delay = (bytes_left*10*10000) / (dev->speed/100);
- else
- delay = 0;
- /* expected delay (usec) until remaining bytes are sent */
- if (delay < 100) {
- udelay(delay);
- delay = 0;
- break;
- }
- /* sleep some longer delay (msec) */
- delay = (delay+999) / 1000;
- break;
-
- case SIRDEV_STATE_WAIT_UNTIL_SENT:
- /* block until underlaying hardware buffer are empty */
- if (dev->drv->wait_until_sent)
- dev->drv->wait_until_sent(dev);
- next_state = SIRDEV_STATE_TX_DONE;
- break;
-
- case SIRDEV_STATE_TX_DONE:
- return 0;
-
- default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
- return -EINVAL;
- }
- fsm->substate = next_state;
- } while (delay == 0);
- return delay;
-}
-
-/*
- * Function irda_config_fsm
- *
- * State machine to handle the configuration of the device (and attached dongle, if any).
- * This handler is scheduled for execution in kIrDAd context, so we can sleep.
- * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
- * long. Instead, for longer delays we start a timer to reschedule us later.
- * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
- * Both must be unlocked/restarted on completion - but only on final exit.
- */
-
-static void irda_config_fsm(void *data)
-{
- struct sir_dev *dev = data;
- struct sir_fsm *fsm = &dev->fsm;
- int next_state;
- int ret = -1;
- unsigned delay;
-
- IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
-
- do {
- IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
- __FUNCTION__, fsm->state, fsm->substate);
-
- next_state = fsm->state;
- delay = 0;
-
- switch(fsm->state) {
-
- case SIRDEV_STATE_DONGLE_OPEN:
- if (dev->dongle_drv != NULL) {
- ret = sirdev_put_dongle(dev);
- if (ret) {
- fsm->result = -EINVAL;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
-
- /* Initialize dongle */
- ret = sirdev_get_dongle(dev, fsm->param);
- if (ret) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
-
- /* Dongles are powered through the modem control lines which
- * were just set during open. Before resetting, let's wait for
- * the power to stabilize. This is what some dongle drivers did
- * in open before, while others didn't - should be safe anyway.
- */
-
- delay = 50;
- fsm->substate = SIRDEV_STATE_DONGLE_RESET;
- next_state = SIRDEV_STATE_DONGLE_RESET;
-
- fsm->param = 9600;
-
- break;
-
- case SIRDEV_STATE_DONGLE_CLOSE:
- /* shouldn't we just treat this as success=? */
- if (dev->dongle_drv == NULL) {
- fsm->result = -EINVAL;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
-
- ret = sirdev_put_dongle(dev);
- if (ret) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_SET_DTR_RTS:
- ret = sirdev_set_dtr_rts(dev,
- (fsm->param&0x02) ? TRUE : FALSE,
- (fsm->param&0x01) ? TRUE : FALSE);
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_SET_SPEED:
- fsm->substate = SIRDEV_STATE_WAIT_XMIT;
- next_state = SIRDEV_STATE_DONGLE_CHECK;
- break;
-
- case SIRDEV_STATE_DONGLE_CHECK:
- ret = irda_tx_complete_fsm(dev);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- if ((delay=ret) != 0)
- break;
-
- if (dev->dongle_drv) {
- fsm->substate = SIRDEV_STATE_DONGLE_RESET;
- next_state = SIRDEV_STATE_DONGLE_RESET;
- }
- else {
- dev->speed = fsm->param;
- next_state = SIRDEV_STATE_PORT_SPEED;
- }
- break;
-
- case SIRDEV_STATE_DONGLE_RESET:
- if (dev->dongle_drv->reset) {
- ret = dev->dongle_drv->reset(dev);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
- else
- ret = 0;
- if ((delay=ret) == 0) {
- /* set serial port according to dongle default speed */
- if (dev->drv->set_speed)
- dev->drv->set_speed(dev, dev->speed);
- fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
- next_state = SIRDEV_STATE_DONGLE_SPEED;
- }
- break;
-
- case SIRDEV_STATE_DONGLE_SPEED:
- if (dev->dongle_drv->reset) {
- ret = dev->dongle_drv->set_speed(dev, fsm->param);
- if (ret < 0) {
- fsm->result = ret;
- next_state = SIRDEV_STATE_ERROR;
- break;
- }
- }
- else
- ret = 0;
- if ((delay=ret) == 0)
- next_state = SIRDEV_STATE_PORT_SPEED;
- break;
-
- case SIRDEV_STATE_PORT_SPEED:
- /* Finally we are ready to change the serial port speed */
- if (dev->drv->set_speed)
- dev->drv->set_speed(dev, dev->speed);
- dev->new_speed = 0;
- next_state = SIRDEV_STATE_DONE;
- break;
-
- case SIRDEV_STATE_DONE:
- /* Signal network layer so it can send more frames */
- netif_wake_queue(dev->netdev);
- next_state = SIRDEV_STATE_COMPLETE;
- break;
-
- default:
- IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
- fsm->result = -EINVAL;
- /* fall thru */
-
- case SIRDEV_STATE_ERROR:
- IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
-
-#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
- netif_stop_queue(dev->netdev);
-#else
- netif_wake_queue(dev->netdev);
-#endif
- /* fall thru */
-
- case SIRDEV_STATE_COMPLETE:
- /* config change finished, so we are not busy any longer */
- sirdev_enable_rx(dev);
- up(&fsm->sem);
- return;
- }
- fsm->state = next_state;
- } while(!delay);
-
- irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
-}
-
-/* schedule some device configuration task for execution by kIrDAd
- * on behalf of the above state machine.
- * can be called from process or interrupt/tasklet context.
- */
-
-int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
-{
- struct sir_fsm *fsm = &dev->fsm;
- int xmit_was_down;
-
- IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
-
- if (down_trylock(&fsm->sem)) {
- if (in_interrupt() || in_atomic() || irqs_disabled()) {
- IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
- return -EWOULDBLOCK;
- } else
- down(&fsm->sem);
- }
-
- if (fsm->state == SIRDEV_STATE_DEAD) {
- /* race with sirdev_close should never happen */
- IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
- up(&fsm->sem);
- return -ESTALE; /* or better EPIPE? */
- }
-
- xmit_was_down = netif_queue_stopped(dev->netdev);
- netif_stop_queue(dev->netdev);
- atomic_set(&dev->enable_rx, 0);
-
- fsm->state = initial_state;
- fsm->param = param;
- fsm->result = 0;
-
- INIT_LIST_HEAD(&fsm->rq.lh_request);
- fsm->rq.pending = 0;
- fsm->rq.func = irda_config_fsm;
- fsm->rq.data = dev;
-
- if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
- atomic_set(&dev->enable_rx, 1);
- if (!xmit_was_down)
- netif_wake_queue(dev->netdev);
- up(&fsm->sem);
- return -EAGAIN;
- }
- return 0;
-}
-
-static int __init irda_thread_create(void)
-{
- struct completion startup;
- int pid;
-
- spin_lock_init(&irda_rq_queue.lock);
- irda_rq_queue.thread = NULL;
- INIT_LIST_HEAD(&irda_rq_queue.request_list);
- init_waitqueue_head(&irda_rq_queue.kick);
- init_waitqueue_head(&irda_rq_queue.done);
- atomic_set(&irda_rq_queue.num_pending, 0);
-
- init_completion(&startup);
- pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
- if (pid <= 0)
- return -EAGAIN;
- else
- wait_for_completion(&startup);
-
- return 0;
-}
-
-static void __exit irda_thread_join(void)
-{
- if (irda_rq_queue.thread) {
- flush_irda_queue();
- init_completion(&irda_rq_queue.exit);
- irda_rq_queue.thread = NULL;
- wake_up(&irda_rq_queue.kick);
- wait_for_completion(&irda_rq_queue.exit);
- }
-}
-
-module_init(irda_thread_create);
-module_exit(irda_thread_join);
-
-MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
-MODULE_DESCRIPTION("IrDA SIR core");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 58f76cefbc83..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
#include <linux/rtnetlink.h>
#include <linux/serial_reg.h>
#include <linux/dma-mapping.h>
+#include <linux/pnp.h>
#include <linux/platform_device.h>
#include <asm/io.h>
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
iobase + IRCC_MASTER);
}
+#ifdef CONFIG_PNP
+/* PNP hotplug support */
+static const struct pnp_device_id smsc_ircc_pnp_table[] = {
+ { .id = "SMCf010", .driver_data = 0 },
+ /* and presumably others */
+ { }
+};
+MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
+#endif
+
/*******************************************************************************
*
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
/* PROBING
*
- *
+ * REVISIT we can be told about the device by PNP, and should use that info
+ * instead of probing hardware and creating a platform_device ...
*/
static int __init smsc_ircc_look_for_chips(void)
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ea62a3e7d586..411f4d809c47 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mv643xx_eth_update_pscr(dev, &cmd);
mv643xx_set_settings(dev, &cmd);
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_netdev(dev);
if (err)
goto out;
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index 93c494bcd18d..b32765215f75 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -139,8 +139,9 @@ bad_clone_list[] __initdata = {
#if defined(CONFIG_PLAT_MAPPI)
# define DCR_VAL 0x4b
-#elif defined(CONFIG_PLAT_OAKS32R)
-# define DCR_VAL 0x48
+#elif defined(CONFIG_PLAT_OAKS32R) || \
+ defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
+# define DCR_VAL 0x48 /* 8-bit mode */
#else
# define DCR_VAL 0x49
#endif
@@ -396,10 +397,22 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
/* We must set the 8390 for word mode. */
outb_p(DCR_VAL, ioaddr + EN0_DCFG);
start_page = NESM_START_PG;
- stop_page = NESM_STOP_PG;
+
+ /*
+ * Realtek RTL8019AS datasheet says that the PSTOP register
+ * shouldn't exceed 0x60 in 8-bit mode.
+ * This chip can be identified by reading the signature from
+ * the remote byte count registers (otherwise write-only)...
+ */
+ if ((DCR_VAL & 0x01) == 0 && /* 8-bit mode */
+ inb(ioaddr + EN0_RCNTLO) == 0x50 &&
+ inb(ioaddr + EN0_RCNTHI) == 0x70)
+ stop_page = 0x60;
+ else
+ stop_page = NESM_STOP_PG;
} else {
start_page = NE1SM_START_PG;
- stop_page = NE1SM_STOP_PG;
+ stop_page = NE1SM_STOP_PG;
}
#if defined(CONFIG_PLAT_MAPPI) || defined(CONFIG_PLAT_OAKS32R)
@@ -509,15 +522,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
ei_status.name = name;
ei_status.tx_start_page = start_page;
ei_status.stop_page = stop_page;
-#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
- wordlength = 1;
-#endif
-#ifdef CONFIG_PLAT_OAKS32R
- ei_status.word16 = 0;
-#else
- ei_status.word16 = (wordlength == 2);
-#endif
+ /* Use 16-bit mode only if this wasn't overridden by DCR_VAL */
+ ei_status.word16 = (wordlength == 2 && (DCR_VAL & 0x01));
ei_status.rx_start_page = start_page + TX_PAGES;
#ifdef PACKETBUF_MEMSIZE
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 459443b572ce..1b236bdf6b92 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,8 +60,10 @@ int mdiobus_register(struct mii_bus *bus)
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
- if (bus->phy_mask & (1 << i))
+ if (bus->phy_mask & (1 << i)) {
+ bus->phy_map[i] = NULL;
continue;
+ }
phydev = get_phy_device(bus, i);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index b82191d2bee1..f5a3bf4d959a 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -127,6 +127,7 @@ static const struct mii_chip_info {
} mii_chip_table[] = {
{ "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
{ "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
+ { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
{ "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
{ "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
{ "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 227df9876a2c..ffd267fab21d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "1.3"
#define PFX DRV_NAME " "
/*
@@ -79,6 +79,8 @@
#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
+#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
+
static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
@@ -96,6 +98,10 @@ static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+static int idle_timeout = 100;
+module_param(idle_timeout, int, 0);
+MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)");
+
static const struct pci_device_id sky2_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
@@ -122,6 +128,7 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table);
/* Avoid conditionals by using array */
static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
/* This driver supports yukon2 chipset only */
static const char *yukon2_name[] = {
@@ -298,7 +305,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
- if (sky2->autoneg == AUTONEG_ENABLE && hw->chip_id != CHIP_ID_YUKON_XL) {
+ if (sky2->autoneg == AUTONEG_ENABLE &&
+ (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -326,7 +334,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
if (sky2->autoneg == AUTONEG_ENABLE &&
- hw->chip_id == CHIP_ID_YUKON_XL) {
+ (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
ctrl &= ~PHY_M_PC_DSC_MSK;
ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
}
@@ -442,10 +450,11 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
/* set LED Function Control register */
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
- PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
- PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
- PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7))); /* 1000 Mbps */
/* set Polarity Control register */
gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
@@ -459,6 +468,25 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
/* restore page register */
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
break;
+ case CHIP_ID_YUKON_EC_U:
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+ /* select page 3 to access LED control register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+ /* set LED Function Control register */
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+ (PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
+ PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
+ PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
+ PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
+
+ /* set Blink Rate in LED Timer Control Register */
+ gm_phy_write(hw, port, PHY_MARV_INT_MASK,
+ ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
+ /* restore page register */
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
+ break;
default:
/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
@@ -467,19 +495,21 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
}
- if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
+ if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) {
/* apply fixes in PHY AFE */
- gm_phy_write(hw, port, 22, 255);
+ pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
+
/* increase differential signal amplitude in 10BASE-T */
- gm_phy_write(hw, port, 24, 0xaa99);
- gm_phy_write(hw, port, 23, 0x2011);
+ gm_phy_write(hw, port, 0x18, 0xaa99);
+ gm_phy_write(hw, port, 0x17, 0x2011);
/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
- gm_phy_write(hw, port, 24, 0xa204);
- gm_phy_write(hw, port, 23, 0x2002);
+ gm_phy_write(hw, port, 0x18, 0xa204);
+ gm_phy_write(hw, port, 0x17, 0x2002);
/* set page register to 0 */
- gm_phy_write(hw, port, 22, 0);
+ gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
} else {
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
@@ -553,6 +583,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
if (sky2->duplex == DUPLEX_FULL)
reg |= GM_GPCR_DUP_FULL;
+
+ /* turn off pause in 10/100mbps half duplex */
+ else if (sky2->speed != SPEED_1000 &&
+ hw->chip_id != CHIP_ID_YUKON_EC_U)
+ sky2->tx_pause = sky2->rx_pause = 0;
} else
reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
@@ -719,7 +754,7 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
{
struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
- sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
+ sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
return le;
}
@@ -735,7 +770,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
{
struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
- sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
+ sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
return le;
}
@@ -1050,7 +1085,7 @@ static int sky2_up(struct net_device *dev)
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
- imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
return 0;
@@ -1078,7 +1113,7 @@ err_out:
/* Modular subtraction in ring */
static inline int tx_dist(unsigned tail, unsigned head)
{
- return (head - tail) % TX_RING_SIZE;
+ return (head - tail) & (TX_RING_SIZE - 1);
}
/* Number of list elements available for next tx */
@@ -1255,7 +1290,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
le->opcode = OP_BUFFER | HW_OWNER;
fre = sky2->tx_ring
- + ((re - sky2->tx_ring) + i + 1) % TX_RING_SIZE;
+ + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
pci_unmap_addr_set(fre, mapaddr, mapping);
}
@@ -1315,7 +1350,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct tx_ring_info *fre;
- fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
+ fre = sky2->tx_ring + RING_NEXT(put + i, TX_RING_SIZE);
pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
@@ -1401,7 +1436,7 @@ static int sky2_down(struct net_device *dev)
/* Disable port IRQ */
imask = sky2_read32(hw, B0_IMSK);
- imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+ imask &= ~portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
/* turn off LED's */
@@ -1498,17 +1533,26 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, LNK_LED_REG),
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
- if (hw->chip_id == CHIP_ID_YUKON_XL) {
+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U) {
u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+ u16 led = PHY_M_LEDC_LOS_CTRL(1); /* link active */
+
+ switch(sky2->speed) {
+ case SPEED_10:
+ led |= PHY_M_LEDC_INIT_CTRL(7);
+ break;
+
+ case SPEED_100:
+ led |= PHY_M_LEDC_STA1_CTRL(7);
+ break;
+
+ case SPEED_1000:
+ led |= PHY_M_LEDC_STA0_CTRL(7);
+ break;
+ }
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
- gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, PHY_M_LEDC_LOS_CTRL(1) | /* LINK/ACT */
- PHY_M_LEDC_INIT_CTRL(sky2->speed ==
- SPEED_10 ? 7 : 0) |
- PHY_M_LEDC_STA1_CTRL(sky2->speed ==
- SPEED_100 ? 7 : 0) |
- PHY_M_LEDC_STA0_CTRL(sky2->speed ==
- SPEED_1000 ? 7 : 0));
+ gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, led);
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
}
@@ -1583,7 +1627,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
sky2->speed = sky2_phy_speed(hw, aux);
/* Pause bits are offset (9..8) */
- if (hw->chip_id == CHIP_ID_YUKON_XL)
+ if (hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)
aux >>= 6;
sky2->rx_pause = (aux & PHY_M_PS_RX_P_EN) != 0;
@@ -1859,35 +1903,28 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
static int sky2_status_intr(struct sky2_hw *hw, int to_do)
{
int work_done = 0;
+ u16 hwidx = sky2_read16(hw, STAT_PUT_IDX);
rmb();
- for(;;) {
+ while (hw->st_idx != hwidx) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
struct net_device *dev;
struct sky2_port *sky2;
struct sk_buff *skb;
u32 status;
u16 length;
- u8 link, opcode;
- opcode = le->opcode;
- if (!opcode)
- break;
- opcode &= ~HW_OWNER;
-
- hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
- le->opcode = 0;
+ hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
- link = le->link;
- BUG_ON(link >= 2);
- dev = hw->dev[link];
+ BUG_ON(le->link >= 2);
+ dev = hw->dev[le->link];
sky2 = netdev_priv(dev);
length = le->length;
status = le->status;
- switch (opcode) {
+ switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT:
skb = sky2_receive(sky2, length, status);
if (!skb)
@@ -1927,7 +1964,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
case OP_TXINDEXLE:
/* TX index reports status for both ports */
- sky2_tx_done(hw->dev[0], status & 0xffff);
+ BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
+ sky2_tx_done(hw->dev[0], status & 0xfff);
if (hw->dev[1])
sky2_tx_done(hw->dev[1],
((status >> 24) & 0xff)
@@ -1937,8 +1975,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
default:
if (net_ratelimit())
printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", opcode);
- break;
+ "unknown status opcode 0x%x\n", le->opcode);
+ goto exit_loop;
}
}
@@ -2089,12 +2127,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
*/
static void sky2_idle(unsigned long arg)
{
- struct net_device *dev = (struct net_device *) arg;
+ struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct net_device *dev = hw->dev[0];
- local_irq_disable();
if (__netif_rx_schedule_prep(dev))
__netif_rx_schedule(dev);
- local_irq_enable();
+
+ mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
}
@@ -2105,65 +2144,46 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
- restart_poll:
- if (unlikely(status & ~Y2_IS_STAT_BMU)) {
- if (status & Y2_IS_HW_ERR)
- sky2_hw_intr(hw);
-
- if (status & Y2_IS_IRQ_PHY1)
- sky2_phy_intr(hw, 0);
-
- if (status & Y2_IS_IRQ_PHY2)
- sky2_phy_intr(hw, 1);
+ if (status & Y2_IS_HW_ERR)
+ sky2_hw_intr(hw);
- if (status & Y2_IS_IRQ_MAC1)
- sky2_mac_intr(hw, 0);
+ if (status & Y2_IS_IRQ_PHY1)
+ sky2_phy_intr(hw, 0);
- if (status & Y2_IS_IRQ_MAC2)
- sky2_mac_intr(hw, 1);
+ if (status & Y2_IS_IRQ_PHY2)
+ sky2_phy_intr(hw, 1);
- if (status & Y2_IS_CHK_RX1)
- sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
+ if (status & Y2_IS_IRQ_MAC1)
+ sky2_mac_intr(hw, 0);
- if (status & Y2_IS_CHK_RX2)
- sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
+ if (status & Y2_IS_IRQ_MAC2)
+ sky2_mac_intr(hw, 1);
- if (status & Y2_IS_CHK_TXA1)
- sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
+ if (status & Y2_IS_CHK_RX1)
+ sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1);
- if (status & Y2_IS_CHK_TXA2)
- sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
- }
+ if (status & Y2_IS_CHK_RX2)
+ sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2);
- if (status & Y2_IS_STAT_BMU) {
- work_done += sky2_status_intr(hw, work_limit - work_done);
- *budget -= work_done;
- dev0->quota -= work_done;
+ if (status & Y2_IS_CHK_TXA1)
+ sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1);
- if (work_done >= work_limit)
- return 1;
+ if (status & Y2_IS_CHK_TXA2)
+ sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
+ if (status & Y2_IS_STAT_BMU)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
- }
-
- mod_timer(&hw->idle_timer, jiffies + HZ);
- local_irq_disable();
- __netif_rx_complete(dev0);
+ work_done = sky2_status_intr(hw, work_limit);
+ *budget -= work_done;
+ dev0->quota -= work_done;
- status = sky2_read32(hw, B0_Y2_SP_LISR);
+ if (work_done >= work_limit)
+ return 1;
- if (unlikely(status)) {
- /* More work pending, try and keep going */
- if (__netif_rx_schedule_prep(dev0)) {
- __netif_rx_reschedule(dev0, work_done);
- status = sky2_read32(hw, B0_Y2_SP_EISR);
- local_irq_enable();
- goto restart_poll;
- }
- }
+ netif_rx_complete(dev0);
- local_irq_enable();
+ status = sky2_read32(hw, B0_Y2_SP_LISR);
return 0;
}
@@ -2244,13 +2264,6 @@ static int __devinit sky2_reset(struct sky2_hw *hw)
return -EOPNOTSUPP;
}
- /* This chip is new and not tested yet */
- if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
- pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n",
- pci_name(hw->pdev));
- pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n");
- }
-
/* disable ASF */
if (hw->chip_id <= CHIP_ID_YUKON_EC) {
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
@@ -3302,7 +3315,10 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) dev);
+ setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
+ if (idle_timeout > 0)
+ mod_timer(&hw->idle_timer,
+ jiffies + msecs_to_jiffies(idle_timeout));
pci_set_drvdata(pdev, hw);
@@ -3342,6 +3358,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
del_timer_sync(&hw->idle_timer);
sky2_write32(hw, B0_IMSK, 0);
+ synchronize_irq(hw->pdev->irq);
+
dev0 = hw->dev[0];
dev1 = hw->dev[1];
if (dev1)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b026f5653f04..8012994c9b93 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -378,6 +378,9 @@ enum {
CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
CHIP_REV_YU_EC_A3 = 2, /* Chip Rev. for Yukon-EC A3 */
+
+ CHIP_REV_YU_EC_U_A0 = 0,
+ CHIP_REV_YU_EC_U_A1 = 1,
};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 43f5e86fc559..394339d5e87c 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1652,6 +1652,8 @@ spider_net_enable_card(struct spider_net_card *card)
{ SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
{ SPIDER_NET_GMRWOLCTRL, 0 },
+ { SPIDER_NET_GTESTMD, 0x10000000 },
+ { SPIDER_NET_GTTQMSK, 0x00400040 },
{ SPIDER_NET_GTESTMD, 0 },
{ SPIDER_NET_GMACINTEN, 0 },
@@ -1792,15 +1794,7 @@ spider_net_setup_phy(struct spider_net_card *card)
if (phy->def->ops->setup_forced)
phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
- /* the following two writes could be moved to sungem_phy.c */
- /* enable fiber mode */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
- /* LEDs active in both modes, autosense prio = fiber */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
-
- /* switch off fibre autoneg */
- spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
- spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
+ phy->def->ops->enable_fiber(phy);
phy->def->ops->read_link(phy);
pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 5922b529a048..3b8d951cf73c 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -120,6 +120,8 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_GMRUAFILnR 0x00000500
#define SPIDER_NET_GMRUA0FIL15R 0x00000578
+#define SPIDER_NET_GTTQMSK 0x00000934
+
/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
* 0x00000b.. for DMA controller B, etc. */
#define SPIDER_NET_GDADCHA 0x00000a00
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 046371ee5bbe..b2ddd5e79303 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -329,6 +329,30 @@ static int bcm5421_init(struct mii_phy* phy)
return 0;
}
+static int bcm5421_enable_fiber(struct mii_phy* phy)
+{
+ /* enable fiber mode */
+ phy_write(phy, MII_NCONFIG, 0x9020);
+ /* LEDs active in both modes, autosense prio = fiber */
+ phy_write(phy, MII_NCONFIG, 0x945f);
+
+ /* switch off fibre autoneg */
+ phy_write(phy, MII_NCONFIG, 0xfc01);
+ phy_write(phy, 0x0b, 0x0004);
+
+ return 0;
+}
+
+static int bcm5461_enable_fiber(struct mii_phy* phy)
+{
+ phy_write(phy, MII_NCONFIG, 0xfc0c);
+ phy_write(phy, MII_BMCR, 0x4140);
+ phy_write(phy, MII_NCONFIG, 0xfc0b);
+ phy_write(phy, MII_BMCR, 0x0140);
+
+ return 0;
+}
+
static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
{
u16 ctl, adv;
@@ -762,6 +786,7 @@ static struct mii_phy_ops bcm5421_phy_ops = {
.setup_forced = bcm54xx_setup_forced,
.poll_link = genmii_poll_link,
.read_link = bcm54xx_read_link,
+ .enable_fiber = bcm5421_enable_fiber,
};
static struct mii_phy_def bcm5421_phy_def = {
@@ -792,6 +817,25 @@ static struct mii_phy_def bcm5421k2_phy_def = {
.ops = &bcm5421k2_phy_ops
};
+static struct mii_phy_ops bcm5461_phy_ops = {
+ .init = bcm5421_init,
+ .suspend = generic_suspend,
+ .setup_aneg = bcm54xx_setup_aneg,
+ .setup_forced = bcm54xx_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = bcm54xx_read_link,
+ .enable_fiber = bcm5461_enable_fiber,
+};
+
+static struct mii_phy_def bcm5461_phy_def = {
+ .phy_id = 0x002060c0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "BCM5461",
+ .features = MII_GBIT_FEATURES,
+ .magic_aneg = 1,
+ .ops = &bcm5461_phy_ops
+};
+
/* Broadcom BCM 5462 built-in Vesta */
static struct mii_phy_ops bcm5462V_phy_ops = {
.init = bcm5421_init,
@@ -857,6 +901,7 @@ static struct mii_phy_def* mii_phy_table[] = {
&bcm5411_phy_def,
&bcm5421_phy_def,
&bcm5421k2_phy_def,
+ &bcm5461_phy_def,
&bcm5462V_phy_def,
&marvell_phy_def,
&genmii_phy_def,
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 430544496c52..69e125197fcf 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -12,6 +12,7 @@ struct mii_phy_ops
int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
int (*poll_link)(struct mii_phy *phy);
int (*read_link)(struct mii_phy *phy);
+ int (*enable_fiber)(struct mii_phy *phy);
};
/* Structure used to statically define an mii/gii based PHY */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73e271e59c6a..2bd9592b75cd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.56"
-#define DRV_MODULE_RELDATE "Apr 1, 2006"
+#define DRV_MODULE_VERSION "3.57"
+#define DRV_MODULE_RELDATE "Apr 28, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err;
}
+static void tg3_link_report(struct tg3 *);
+
/* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero.
*/
@@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err != 0)
return -EBUSY;
+ if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+ netif_carrier_off(tp->dev);
+ tg3_link_report(tp);
+ }
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@@ -1023,6 +1030,12 @@ out:
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
+ else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ }
/* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
return IRQ_RETVAL(0);
}
-static int tg3_init_hw(struct tg3 *);
+static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
@@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 0);
tg3_netif_start(tp);
@@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!netif_running(dev))
return 0;
- spin_lock_bh(&tp->lock);
- __tg3_set_mac_addr(tp);
- spin_unlock_bh(&tp->lock);
+ if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ /* Reset chip so that ASF can re-init any MAC addresses it
+ * needs.
+ */
+ tg3_netif_stop(tp);
+ tg3_full_lock(tp, 1);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_init_hw(tp, 0);
+
+ tg3_netif_start(tp);
+ tg3_full_unlock(tp);
+ } else {
+ spin_lock_bh(&tp->lock);
+ __tg3_set_mac_addr(tp);
+ spin_unlock_bh(&tp->lock);
+ }
return 0;
}
@@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
}
/* tp->lock is held. */
-static int tg3_reset_hw(struct tg3 *tp)
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
u32 val, rdmac_mode;
int i, err, limit;
@@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tg3_abort_hw(tp, 1);
}
- if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
+ if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
}
- err = tg3_setup_phy(tp, 1);
+ err = tg3_setup_phy(tp, reset_phy);
if (err)
return err;
@@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
/* Called at device open time to get the chip ready for
* packet processing. Invoked with tp->lock held.
*/
-static int tg3_init_hw(struct tg3 *tp)
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
{
int err;
@@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
- err = tg3_reset_hw(tp);
+ err = tg3_reset_hw(tp, reset_phy);
out:
return err;
@@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_init_hw(tp);
+ err = tg3_init_hw(tp, 1);
tg3_full_unlock(tp);
@@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
- err = tg3_init_hw(tp);
+ err = tg3_init_hw(tp, 1);
if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
@@ -7839,7 +7866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -7884,7 +7911,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -8427,6 +8454,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tx_len = 1514;
skb = dev_alloc_skb(tx_len);
+ if (!skb)
+ return -ENOMEM;
+
tx_data = skb_put(skb, tx_len);
memcpy(tx_data, tp->dev->dev_addr, 6);
memset(tx_data + 6, 0x0, 8);
@@ -8522,7 +8552,7 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp);
+ tg3_reset_hw(tp, 1);
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8596,7 +8626,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tg3_netif_start(tp);
}
@@ -9377,7 +9407,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
if ((page_off == 0) || (i == 0))
nvram_cmd |= NVRAM_CMD_FIRST;
- else if (page_off == (tp->nvram_pagesize - 4))
+ if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST;
if (i == (len - 4))
@@ -10353,10 +10383,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
- if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787))
- tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
+ else
+ tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
+ }
tp->coalesce_mode = 0;
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@@ -11569,7 +11602,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
@@ -11603,7 +11636,7 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp);
+ tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8c8b987d1250..0e29b885d449 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2215,6 +2215,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO_2 0x08000000
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000
+#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 6a23964c1317..a6dc53b4250d 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -129,6 +129,7 @@
- Massive clean-up
- Rewrite PHY, media handling (remove options, full_duplex, backoff)
- Fix Tx engine race for good
+ - Craig Brind: Zero padded aligned buffers for short packets.
*/
@@ -1326,7 +1327,12 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
rp->stats.tx_dropped++;
return 0;
}
+
+ /* Padding is not copied and so must be redone. */
skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
+ if (skb->len < ETH_ZLEN)
+ memset(rp->tx_buf[entry] + skb->len, 0,
+ ETH_ZLEN - skb->len);
rp->tx_skbuff_dma[entry] = 0;
rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
(rp->tx_buf[entry] -
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 9a06e61df0a2..e2982a83ae42 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -939,9 +939,9 @@ static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
return 0;
}
-static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
+static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
{
- struct ieee80211_geo geo;
+ struct ieee80211_geo *geo;
struct ieee80211_channel *chan;
int have_a = 0, have_bg = 0;
int i;
@@ -949,7 +949,10 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
struct bcm43xx_phyinfo *phy;
const char *iso_country;
- memset(&geo, 0, sizeof(geo));
+ geo = kzalloc(sizeof(*geo), GFP_KERNEL);
+ if (!geo)
+ return -ENOMEM;
+
for (i = 0; i < bcm->nr_80211_available; i++) {
phy = &(bcm->core_80211_ext[i].phy);
switch (phy->type) {
@@ -967,31 +970,36 @@ static void bcm43xx_geo_init(struct bcm43xx_private *bcm)
iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
if (have_a) {
- for (i = 0, channel = 0; channel < 201; channel++) {
- chan = &geo.a[i++];
+ for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
+ channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
+ chan = &geo->a[i++];
chan->freq = bcm43xx_channel_to_freq_a(channel);
chan->channel = channel;
}
- geo.a_channels = i;
+ geo->a_channels = i;
}
if (have_bg) {
- for (i = 0, channel = 1; channel < 15; channel++) {
- chan = &geo.bg[i++];
+ for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
+ channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) {
+ chan = &geo->bg[i++];
chan->freq = bcm43xx_channel_to_freq_bg(channel);
chan->channel = channel;
}
- geo.bg_channels = i;
+ geo->bg_channels = i;
}
- memcpy(geo.name, iso_country, 2);
+ memcpy(geo->name, iso_country, 2);
if (0 /*TODO: Outdoor use only */)
- geo.name[2] = 'O';
+ geo->name[2] = 'O';
else if (0 /*TODO: Indoor use only */)
- geo.name[2] = 'I';
+ geo->name[2] = 'I';
else
- geo.name[2] = ' ';
- geo.name[3] = '\0';
+ geo->name[2] = ' ';
+ geo->name[3] = '\0';
+
+ ieee80211_set_geo(bcm->ieee, geo);
+ kfree(geo);
- ieee80211_set_geo(bcm->ieee, &geo);
+ return 0;
}
/* DummyTransmission function, as documented on
@@ -3479,16 +3487,17 @@ static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
goto err_80211_unwind;
bcm43xx_wireless_core_disable(bcm);
}
+ err = bcm43xx_geo_init(bcm);
+ if (err)
+ goto err_80211_unwind;
bcm43xx_pctl_set_crystal(bcm, 0);
/* Set the MAC address in the networking subsystem */
- if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
+ if (is_valid_ether_addr(bcm->sprom.et1macaddr))
memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
else
memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
- bcm43xx_geo_init(bcm);
-
snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
"Broadcom %04X", bcm->chip_id);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index eca79a38594a..30a202b258b5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -118,12 +118,14 @@ int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
static inline
int bcm43xx_is_valid_channel_a(u8 channel)
{
- return (channel <= 200);
+ return (channel >= IEEE80211_52GHZ_MIN_CHANNEL
+ && channel <= IEEE80211_52GHZ_MAX_CHANNEL);
}
static inline
int bcm43xx_is_valid_channel_bg(u8 channel)
{
- return (channel >= 1 && channel <= 14);
+ return (channel >= IEEE80211_24GHZ_MIN_CHANNEL
+ && channel <= IEEE80211_24GHZ_MAX_CHANNEL);
}
static inline
int bcm43xx_is_valid_channel(struct bcm43xx_private *bcm,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index 33137165727f..b0abac515530 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1287,7 +1287,7 @@ static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
if (radio->revision == 8)
bcm43xx_phy_write(bcm, 0x0805, 0x3230);
bcm43xx_phy_init_pctl(bcm);
- if (bcm->chip_id == 0x4306 && bcm->chip_package != 2) {
+ if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
bcm43xx_phy_write(bcm, 0x0429,
bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
bcm43xx_phy_write(bcm, 0x04C3,
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 3edbb481a0a0..b45063974ae9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -182,8 +182,11 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
mode = BCM43xx_INITIAL_IWMODE;
bcm43xx_lock_mmio(bcm, flags);
- if (bcm->ieee->iw_mode != mode)
- bcm43xx_set_iwmode(bcm, mode);
+ if (bcm->initialized) {
+ if (bcm->ieee->iw_mode != mode)
+ bcm43xx_set_iwmode(bcm, mode);
+ } else
+ bcm->ieee->iw_mode = mode;
bcm43xx_unlock_mmio(bcm, flags);
return 0;