summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorRob Herring <r.herring@freescale.com>2009-01-25 19:43:26 -0500
committerRob Herring <r.herring@freescale.com>2009-02-17 11:32:54 -0600
commitaa2643ed8f7c03f8dc68b7f6f5b290f490385a43 (patch)
treede6fe3e0850e5195b3f33e694c6345faabd1754f /drivers/net
parent484ecd8c05117a795d856e57e45be48ecea07eae (diff)
ENGR00107731-2: Port imx 3.3.0 release to 2.6.28
Port rel_imx_2.6.26_3.3.0 to 2.6.28. PMIC Regulator and ASoC drivers are removed and not yet ported. Updated asm/arch headers for move to plat-mxc/include/mach device_create parameters changed. sysdev attribute functions changed. Adopt mainline MX3 timer code and update clock init flow. Signed-off-by: Rob Herring <r.herring@freescale.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig52
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/flexcan/Makefile3
-rw-r--r--drivers/net/can/flexcan/dev.c619
-rw-r--r--drivers/net/can/flexcan/drv.c624
-rw-r--r--drivers/net/can/flexcan/flexcan.h223
-rw-r--r--drivers/net/can/flexcan/mbm.c347
-rw-r--r--drivers/net/cs89x0.c21
-rw-r--r--drivers/net/fec.c695
-rw-r--r--drivers/net/fec.h18
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/mxc_ir.c1777
-rw-r--r--drivers/net/irda/mxc_ir.h133
-rw-r--r--drivers/net/smc911x.h8
-rw-r--r--drivers/net/smsc911x.c2198
-rw-r--r--drivers/net/smsc911x.h395
19 files changed, 7019 insertions, 110 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 231eeaf1d552..515f86fbccc9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -970,14 +970,26 @@ config SMC911X
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
- Say Y if you want it compiled into the kernel,
+ Say Y if you want it compiled into the kernel,
and read the Ethernet-HOWTO, available from
<http://www.linuxdoc.org/docs.html#howto>.
- This driver is also available as a module. The module will be
- called smc911x. If you want to compile it as a module, say M
+ This driver is also available as a module. The module will be
+ called smc911x. If you want to compile it as a module, say M
here and read <file:Documentation/kbuild/modules.txt>
+config SMSC911X
+ tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
+ depends on NET_ETHERNET
+ select CRC32
+ select MII
+ ---help---
+ Say Y here if you want support for SMSC LAN911x and LAN921x families
+ of ethernet controllers.
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/networking/net-modules.txt>. The module
+ will be called smsc911x.
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on ISA
@@ -1393,7 +1405,7 @@ config FORCEDETH_NAPI
config CS89x0
tristate "CS89x0 support"
depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
- || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS)
+ || ARCH_IXDP2X01 || ARCH_PNX010X || ARCH_MXC)
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the
@@ -1407,7 +1419,7 @@ config CS89x0
config CS89x0_NONISA_IRQ
def_bool y
depends on CS89x0 != n
- depends on MACH_IXDP2351 || ARCH_IXDP2X01 || ARCH_PNX010X || MACH_MX31ADS
+ depends on MACH_IXDP2351 || ARCH_IXDP2X01 || ARCH_PNX010X || ARCH_MXC
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
@@ -1433,9 +1445,9 @@ config E100
select MII
---help---
This driver supports Intel(R) PRO/100 family of adapters.
- To verify that your adapter is supported, find the board ID number
- on the adapter. Look for a label that has a barcode and a number
- in the format 123456-001 (six digits hyphen three digits).
+ To verify that your adapter is supported, find the board ID number
+ on the adapter. Look for a label that has a barcode and a number
+ in the format 123456-001 (six digits hyphen three digits).
Use the above information and the Adapter & Driver ID Guide at:
@@ -1447,7 +1459,7 @@ config E100
<http://appsr.intel.com/scripts-df/support_intel.asp>
- More specific information on configuring the driver is in
+ More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
To compile this driver as a module, choose M here. The module
@@ -1810,11 +1822,11 @@ config 68360_ENET
the Motorola 68360 processor.
config FEC
- bool "FEC ethernet controller (of ColdFire CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x
+ tristate "FEC ethernet controller"
+ depends on M523x || M527x || M5272 || M528x || M520x || ARCH_MX27 || ARCH_MX37 || ARCH_MX35 || ARCH_MX51 || ARCH_MX25
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire processors.
+ controller on some Motorola/Freescale processors.
config FEC2
bool "Second FEC ethernet controller (on some ColdFire CPUs)"
@@ -1935,7 +1947,7 @@ config E1000
depends on PCI
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
- adapters. For more information on how to identify your adapter, go
+ adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
@@ -1945,7 +1957,7 @@ config E1000
<http://support.intel.com>
- More specific information on configuring the driver is in
+ More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
@@ -2122,7 +2134,7 @@ config SKGE
and related Gigabit Ethernet adapters. It is a new smaller driver
with better performance and more complete ethtool support.
- It does not support the link failover and network management
+ It does not support the link failover and network management
features that "portable" vendor supplied sk98lin driver does.
This driver supports adapters based on the original Yukon chipset:
@@ -2466,7 +2478,7 @@ config IXGB
<http://support.intel.com>
- More specific information on configuring the driver is in
+ More specific information on configuring the driver is in
<file:Documentation/networking/ixgb.txt>.
To compile this driver as a module, choose M here. The module
@@ -2476,8 +2488,8 @@ config S2IO
tristate "S2IO 10Gbe XFrame NIC"
depends on PCI
---help---
- This driver supports the 10Gbe XFrame NIC of S2IO.
- More specific information on configuring the driver is in
+ This driver supports the 10Gbe XFrame NIC of S2IO.
+ More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
config MYRI10GE
@@ -2897,9 +2909,9 @@ config PPPOE
Support for PPP over Ethernet.
This driver requires the latest version of pppd from the CVS
- repository at cvs.samba.org. Alternatively, see the
+ repository at cvs.samba.org. Alternatively, see the
RoaringPenguin package (<http://www.roaringpenguin.com/pppoe>)
- which contains instruction on how to use this driver (under
+ which contains instruction on how to use this driver (under
the heading "Kernel mode PPPoE").
config PPPOATM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 017383ad5ec6..e77876c28738 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -220,6 +220,7 @@ obj-$(CONFIG_S2IO) += s2io.o
obj-$(CONFIG_MYRI10GE) += myri10ge/
obj-$(CONFIG_SMC91X) += smc91x.o
obj-$(CONFIG_SMC911X) += smc911x.o
+obj-$(CONFIG_SMSC911X) += smsc911x.o
obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
obj-$(CONFIG_DM9000) += dm9000.o
obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 57def0d57371..8df620046fbf 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -22,4 +22,13 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
+config CAN_FLEXCAN
+ tristate "Freescale FlexCAN"
+ depends on CAN && ARCH_MX35
+ default m
+ ---help---
+ This select the support of Freescale CAN(FlexCAN).
+ This driver can also be built as a module.
+ If unsure, say N.
+
endmenu
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c4bead705cd9..3bc75bf32592 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_CAN_VCAN) += vcan.o
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile
new file mode 100644
index 000000000000..b2dbb4fb2793
--- /dev/null
+++ b/drivers/net/can/flexcan/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+
+flexcan-y := dev.o drv.o mbm.o
diff --git a/drivers/net/can/flexcan/dev.c b/drivers/net/can/flexcan/dev.c
new file mode 100644
index 000000000000..fc7b6a899e53
--- /dev/null
+++ b/drivers/net/can/flexcan/dev.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file dev.c
+ *
+ * @brief Driver for Freescale CAN Controller FlexCAN.
+ *
+ * @ingroup can
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+#include <linux/module.h>
+#include "flexcan.h"
+
+enum {
+ FLEXCAN_ATTR_STATE = 0,
+ FLEXCAN_ATTR_BITRATE,
+ FLEXCAN_ATTR_BR_PRESDIV,
+ FLEXCAN_ATTR_BR_RJW,
+ FLEXCAN_ATTR_BR_PROPSEG,
+ FLEXCAN_ATTR_BR_PSEG1,
+ FLEXCAN_ATTR_BR_PSEG2,
+ FLEXCAN_ATTR_BR_CLKSRC,
+ FLEXCAN_ATTR_MAXMB,
+ FLEXCAN_ATTR_XMIT_MAXMB,
+ FLEXCAN_ATTR_FIFO,
+ FLEXCAN_ATTR_WAKEUP,
+ FLEXCAN_ATTR_SRX_DIS,
+ FLEXCAN_ATTR_WAK_SRC,
+ FLEXCAN_ATTR_BCC,
+ FLEXCAN_ATTR_LOCAL_PRIORITY,
+ FLEXCAN_ATTR_ABORT,
+ FLEXCAN_ATTR_LOOPBACK,
+ FLEXCAN_ATTR_SMP,
+ FLEXCAN_ATTR_BOFF_REC,
+ FLEXCAN_ATTR_TSYN,
+ FLEXCAN_ATTR_LISTEN,
+ FLEXCAN_ATTR_EXTEND_MSG,
+ FLEXCAN_ATTR_STANDARD_MSG,
+#ifdef CONFIG_CAN_DEBUG_DEVICES
+ FLEXCAN_ATTR_DUMP_REG,
+ FLEXCAN_ATTR_DUMP_XMIT_MB,
+ FLEXCAN_ATTR_DUMP_RX_MB,
+#endif
+ FLEXCAN_ATTR_MAX
+};
+
+static ssize_t flexcan_show_attr(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t flexcan_set_attr(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count);
+
+static struct device_attribute flexcan_dev_attr[FLEXCAN_ATTR_MAX] = {
+ [FLEXCAN_ATTR_STATE] = __ATTR(state, 0444, flexcan_show_attr, NULL),
+ [FLEXCAN_ATTR_BITRATE] =
+ __ATTR(bitrate, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_PRESDIV] =
+ __ATTR(br_presdiv, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_RJW] =
+ __ATTR(br_rjw, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_PROPSEG] =
+ __ATTR(br_propseg, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_PSEG1] =
+ __ATTR(br_pseg1, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_PSEG2] =
+ __ATTR(br_pseg2, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BR_CLKSRC] =
+ __ATTR(br_clksrc, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_MAXMB] =
+ __ATTR(maxmb, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_XMIT_MAXMB] =
+ __ATTR(xmit_maxmb, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_FIFO] =
+ __ATTR(fifo, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_WAKEUP] =
+ __ATTR(wakeup, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_SRX_DIS] =
+ __ATTR(srx_dis, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_WAK_SRC] =
+ __ATTR(wak_src, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BCC] =
+ __ATTR(bcc, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_LOCAL_PRIORITY] =
+ __ATTR(local_priority, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_ABORT] =
+ __ATTR(abort, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_LOOPBACK] =
+ __ATTR(loopback, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_SMP] =
+ __ATTR(smp, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_BOFF_REC] =
+ __ATTR(boff_rec, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_TSYN] =
+ __ATTR(tsyn, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_LISTEN] =
+ __ATTR(listen, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_EXTEND_MSG] =
+ __ATTR(ext_msg, 0644, flexcan_show_attr, flexcan_set_attr),
+ [FLEXCAN_ATTR_STANDARD_MSG] =
+ __ATTR(std_msg, 0644, flexcan_show_attr, flexcan_set_attr),
+#ifdef CONFIG_CAN_DEBUG_DEVICES
+ [FLEXCAN_ATTR_DUMP_REG] =
+ __ATTR(dump_reg, 0444, flexcan_show_attr, NULL),
+ [FLEXCAN_ATTR_DUMP_XMIT_MB] =
+ __ATTR(dump_xmit_mb, 0444, flexcan_show_attr, NULL),
+ [FLEXCAN_ATTR_DUMP_RX_MB] =
+ __ATTR(dump_rx_mb, 0444, flexcan_show_attr, NULL),
+#endif
+};
+
+static void flexcan_set_bitrate(struct flexcan_device *flexcan, int bitrate)
+{
+ /* TODO:: implement in future
+ * based on the bitrate to get the timing of
+ * presdiv, pseg1, pseg2, propseg
+ */
+}
+
+static void flexcan_update_bitrate(struct flexcan_device *flexcan)
+{
+ int rate, div;
+
+ if (flexcan->br_clksrc)
+ rate = clk_get_rate(flexcan->clk);
+ else {
+ struct clk *clk;
+ clk = clk_get(NULL, "ckih");
+ if (!clk)
+ return;
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+ }
+ if (!rate)
+ return;
+
+ div = (flexcan->br_presdiv + 1);
+ div *=
+ (flexcan->br_propseg + flexcan->br_pseg1 + flexcan->br_pseg2 + 4);
+ flexcan->bitrate = (rate + div - 1) / div;
+}
+
+#ifdef CONFIG_CAN_DEBUG_DEVICES
+static int flexcan_dump_reg(struct flexcan_device *flexcan, char *buf)
+{
+ int ret = 0;
+ unsigned int reg;
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ ret += sprintf(buf + ret, "MCR::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_CTRL);
+ ret += sprintf(buf + ret, "CTRL::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_RXGMASK);
+ ret += sprintf(buf + ret, "RXGMASK::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_RX14MASK);
+ ret += sprintf(buf + ret, "RX14MASK::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_RX15MASK);
+ ret += sprintf(buf + ret, "RX15MASK::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_ECR);
+ ret += sprintf(buf + ret, "ECR::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_ESR);
+ ret += sprintf(buf + ret, "ESR::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_IMASK2);
+ ret += sprintf(buf + ret, "IMASK2::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_IMASK1);
+ ret += sprintf(buf + ret, "IMASK1::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_IFLAG2);
+ ret += sprintf(buf + ret, "IFLAG2::0x%x\n", reg);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_IFLAG1);
+ ret += sprintf(buf + ret, "IFLAG1::0x%x\n", reg);
+ return ret;
+}
+
+static int flexcan_dump_xmit_mb(struct flexcan_device *flexcan, char *buf)
+{
+ int ret = 0, i;
+ i = flexcan->xmit_maxmb + 1;
+ for (; i <= flexcan->maxmb; i++)
+ ret +=
+ sprintf(buf + ret,
+ "mb[%d]::CS:0x%x ID:0x%x DATA[1~2]:0x%02x,0x%02x\n",
+ i, flexcan->hwmb[i].mb_cs.data,
+ flexcan->hwmb[i].mb_id, flexcan->hwmb[i].mb_data[1],
+ flexcan->hwmb[i].mb_data[2]);
+ return ret;
+}
+
+static int flexcan_dump_rx_mb(struct flexcan_device *flexcan, char *buf)
+{
+ int ret = 0, i;
+ for (i = 0; i <= flexcan->xmit_maxmb; i++)
+ ret +=
+ sprintf(buf + ret,
+ "mb[%d]::CS:0x%x ID:0x%x DATA[1~2]:0x%02x,0x%02x\n",
+ i, flexcan->hwmb[i].mb_cs.data,
+ flexcan->hwmb[i].mb_id, flexcan->hwmb[i].mb_data[1],
+ flexcan->hwmb[i].mb_data[2]);
+ return ret;
+}
+#endif
+
+static ssize_t flexcan_show_state(struct net_device *net, char *buf)
+{
+ int ret, esr;
+ struct flexcan_device *flexcan = netdev_priv(net);
+ ret = sprintf(buf, "%s::", netif_running(net) ? "Start" : "Stop");
+ if (netif_carrier_ok(net)) {
+ esr = __raw_readl(flexcan->io_base + CAN_HW_REG_ESR);
+ switch ((esr & __ESR_FLT_CONF_MASK) >> __ESR_FLT_CONF_OFF) {
+ case 0:
+ ret += sprintf(buf + ret, "normal\n");
+ break;
+ case 1:
+ ret += sprintf(buf + ret, "error passive\n");
+ break;
+ default:
+ ret += sprintf(buf + ret, "bus off\n");
+ }
+ } else
+ ret += sprintf(buf + ret, "bus off\n");
+ return ret;
+}
+
+static ssize_t flexcan_show_attr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int attr_id;
+ struct net_device *net;
+ struct flexcan_device *flexcan;
+
+ net = dev_get_drvdata(dev);
+ BUG_ON(!net);
+ flexcan = netdev_priv(net);
+ BUG_ON(!flexcan);
+
+ attr_id = attr - flexcan_dev_attr;
+ switch (attr_id) {
+ case FLEXCAN_ATTR_STATE:
+ return flexcan_show_state(net, buf);
+ case FLEXCAN_ATTR_BITRATE:
+ return sprintf(buf, "%d\n", flexcan->bitrate);
+ case FLEXCAN_ATTR_BR_PRESDIV:
+ return sprintf(buf, "%d\n", flexcan->br_presdiv + 1);
+ case FLEXCAN_ATTR_BR_RJW:
+ return sprintf(buf, "%d\n", flexcan->br_rjw);
+ case FLEXCAN_ATTR_BR_PROPSEG:
+ return sprintf(buf, "%d\n", flexcan->br_propseg + 1);
+ case FLEXCAN_ATTR_BR_PSEG1:
+ return sprintf(buf, "%d\n", flexcan->br_pseg1 + 1);
+ case FLEXCAN_ATTR_BR_PSEG2:
+ return sprintf(buf, "%d\n", flexcan->br_pseg2 + 1);
+ case FLEXCAN_ATTR_BR_CLKSRC:
+ return sprintf(buf, "%s\n", flexcan->br_clksrc ? "bus" : "osc");
+ case FLEXCAN_ATTR_MAXMB:
+ return sprintf(buf, "%d\n", flexcan->maxmb + 1);
+ case FLEXCAN_ATTR_XMIT_MAXMB:
+ return sprintf(buf, "%d\n", flexcan->xmit_maxmb + 1);
+ case FLEXCAN_ATTR_FIFO:
+ return sprintf(buf, "%d\n", flexcan->fifo);
+ case FLEXCAN_ATTR_WAKEUP:
+ return sprintf(buf, "%d\n", flexcan->wakeup);
+ case FLEXCAN_ATTR_SRX_DIS:
+ return sprintf(buf, "%d\n", flexcan->srx_dis);
+ case FLEXCAN_ATTR_WAK_SRC:
+ return sprintf(buf, "%d\n", flexcan->wak_src);
+ case FLEXCAN_ATTR_BCC:
+ return sprintf(buf, "%d\n", flexcan->bcc);
+ case FLEXCAN_ATTR_LOCAL_PRIORITY:
+ return sprintf(buf, "%d\n", flexcan->lprio);
+ case FLEXCAN_ATTR_ABORT:
+ return sprintf(buf, "%d\n", flexcan->abort);
+ case FLEXCAN_ATTR_LOOPBACK:
+ return sprintf(buf, "%d\n", flexcan->loopback);
+ case FLEXCAN_ATTR_SMP:
+ return sprintf(buf, "%d\n", flexcan->smp);
+ case FLEXCAN_ATTR_BOFF_REC:
+ return sprintf(buf, "%d\n", flexcan->boff_rec);
+ case FLEXCAN_ATTR_TSYN:
+ return sprintf(buf, "%d\n", flexcan->tsyn);
+ case FLEXCAN_ATTR_LISTEN:
+ return sprintf(buf, "%d\n", flexcan->listen);
+ case FLEXCAN_ATTR_EXTEND_MSG:
+ return sprintf(buf, "%d\n", flexcan->ext_msg);
+ case FLEXCAN_ATTR_STANDARD_MSG:
+ return sprintf(buf, "%d\n", flexcan->std_msg);
+#ifdef CONFIG_CAN_DEBUG_DEVICES
+ case FLEXCAN_ATTR_DUMP_REG:
+ return flexcan_dump_reg(flexcan, buf);
+ case FLEXCAN_ATTR_DUMP_XMIT_MB:
+ return flexcan_dump_xmit_mb(flexcan, buf);
+ case FLEXCAN_ATTR_DUMP_RX_MB:
+ return flexcan_dump_rx_mb(flexcan, buf);
+#endif
+ default:
+ return sprintf(buf, "%s:%p->%p\n", __func__, flexcan_dev_attr,
+ attr);
+ }
+}
+
+static ssize_t flexcan_set_attr(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ int attr_id, tmp;
+ struct net_device *net;
+ struct flexcan_device *flexcan;
+
+ net = dev_get_drvdata(dev);
+ BUG_ON(!net);
+ flexcan = netdev_priv(net);
+ BUG_ON(!flexcan);
+
+ attr_id = attr - flexcan_dev_attr;
+
+ if (mutex_lock_interruptible(&flexcan->mutex))
+ return count;
+
+ if (netif_running(net))
+ goto set_finish;
+
+ if (attr_id == FLEXCAN_ATTR_BR_CLKSRC) {
+ if (!strcasecmp(buf, "bus"))
+ flexcan->br_clksrc = 1;
+ else if (!strcasecmp(buf, "osc"))
+ flexcan->br_clksrc = 0;
+ goto set_finish;
+ }
+
+ tmp = simple_strtoul(buf, NULL, 0);
+ switch (attr_id) {
+ case FLEXCAN_ATTR_BITRATE:
+ flexcan_set_bitrate(flexcan, tmp);
+ break;
+ case FLEXCAN_ATTR_BR_PRESDIV:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_PRESDIV)) {
+ flexcan->br_presdiv = tmp - 1;
+ flexcan_update_bitrate(flexcan);
+ }
+ break;
+ case FLEXCAN_ATTR_BR_RJW:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_RJW))
+ flexcan->br_rjw = tmp - 1;
+ break;
+ case FLEXCAN_ATTR_BR_PROPSEG:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_PROPSEG)) {
+ flexcan->br_propseg = tmp - 1;
+ flexcan_update_bitrate(flexcan);
+ }
+ break;
+ case FLEXCAN_ATTR_BR_PSEG1:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_PSEG1)) {
+ flexcan->br_pseg1 = tmp - 1;
+ flexcan_update_bitrate(flexcan);
+ }
+ break;
+ case FLEXCAN_ATTR_BR_PSEG2:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_PSEG2)) {
+ flexcan->br_pseg2 = tmp - 1;
+ flexcan_update_bitrate(flexcan);
+ }
+ break;
+ case FLEXCAN_ATTR_MAXMB:
+ if ((tmp > 0) && (tmp <= FLEXCAN_MAX_MB)) {
+ if (flexcan->maxmb != (tmp - 1)) {
+ flexcan->maxmb = tmp - 1;
+ if (flexcan->xmit_maxmb < flexcan->maxmb)
+ flexcan->xmit_maxmb = flexcan->maxmb;
+ }
+ }
+ break;
+ case FLEXCAN_ATTR_XMIT_MAXMB:
+ if ((tmp > 0) && (tmp <= (flexcan->maxmb + 1))) {
+ if (flexcan->xmit_maxmb != (tmp - 1))
+ flexcan->xmit_maxmb = tmp - 1;
+ }
+ break;
+ case FLEXCAN_ATTR_FIFO:
+ flexcan->fifo = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_WAKEUP:
+ flexcan->wakeup = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_SRX_DIS:
+ flexcan->srx_dis = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_WAK_SRC:
+ flexcan->wak_src = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_BCC:
+ flexcan->bcc = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_LOCAL_PRIORITY:
+ flexcan->lprio = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_ABORT:
+ flexcan->abort = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_LOOPBACK:
+ flexcan->loopback = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_SMP:
+ flexcan->smp = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_BOFF_REC:
+ flexcan->boff_rec = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_TSYN:
+ flexcan->tsyn = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_LISTEN:
+ flexcan->listen = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_EXTEND_MSG:
+ flexcan->ext_msg = tmp ? 1 : 0;
+ break;
+ case FLEXCAN_ATTR_STANDARD_MSG:
+ flexcan->std_msg = tmp ? 1 : 0;
+ break;
+ }
+ set_finish:
+ mutex_unlock(&flexcan->mutex);
+ return count;
+}
+
+static void flexcan_device_default(struct flexcan_device *dev)
+{
+ dev->br_clksrc = 1;
+ dev->br_rjw = 2;
+ dev->br_presdiv = 6;
+ dev->br_propseg = 4;
+ dev->br_pseg1 = 4;
+ dev->br_pseg2 = 7;
+
+ dev->bcc = 1;
+ dev->srx_dis = 1;
+ dev->smp = 1;
+ dev->boff_rec = 1;
+
+ dev->maxmb = FLEXCAN_MAX_MB - 1;
+ dev->xmit_maxmb = (FLEXCAN_MAX_MB >> 1) - 1;
+ dev->xmit_mb = dev->maxmb - dev->xmit_maxmb;
+
+ dev->ext_msg = 1;
+ dev->std_msg = 1;
+}
+
+static int flexcan_device_attach(struct flexcan_device *flexcan)
+{
+ int ret;
+ struct resource *res;
+ struct platform_device *pdev = flexcan->dev;
+ struct flexcan_platform_data *plat_data = (pdev->dev).platform_data;
+
+ res = platform_get_resource(flexcan->dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ flexcan->io_base = ioremap(res->start, res->end - res->start + 1);
+ if (!flexcan->io_base)
+ return -ENOMEM;
+
+ flexcan->irq = platform_get_irq(flexcan->dev, 0);
+ if (!flexcan->irq) {
+ ret = -ENODEV;
+ goto no_irq_err;
+ }
+
+ ret = -EINVAL;
+ if (plat_data) {
+ if (plat_data->core_reg) {
+ flexcan->core_reg = regulator_get(&pdev->dev,
+ plat_data->core_reg);
+ if (!flexcan->core_reg)
+ goto plat_err;
+ }
+
+ if (plat_data->io_reg) {
+ flexcan->io_reg = regulator_get(&pdev->dev,
+ plat_data->io_reg);
+ if (!flexcan->io_reg)
+ goto plat_err;
+ }
+ }
+ flexcan->clk = clk_get(&(flexcan->dev)->dev, "can_clk");
+ flexcan->hwmb = (struct can_hw_mb *)(flexcan->io_base + CAN_MB_BASE);
+ flexcan->rx_mask = (unsigned int *)(flexcan->io_base + CAN_RXMASK_BASE);
+
+ return 0;
+ plat_err:
+ if (flexcan->core_reg) {
+ regulator_put(flexcan->core_reg, &pdev->dev);
+ flexcan->core_reg = NULL;
+ }
+ no_irq_err:
+ if (flexcan->io_base)
+ iounmap(flexcan->io_base);
+ return ret;
+}
+
+static void flexcan_device_detach(struct flexcan_device *flexcan)
+{
+ struct platform_device *pdev = flexcan->dev;
+ if (flexcan->clk) {
+ clk_put(flexcan->clk);
+ flexcan->clk = NULL;
+ }
+
+ if (flexcan->io_reg) {
+ regulator_put(flexcan->io_reg, &pdev->dev);
+ flexcan->io_reg = NULL;
+ }
+
+ if (flexcan->core_reg) {
+ regulator_put(flexcan->core_reg, &pdev->dev);
+ flexcan->core_reg = NULL;
+ }
+
+ if (flexcan->io_base)
+ iounmap(flexcan->io_base);
+}
+
+/*!
+ * @brief The function allocates can device.
+ *
+ * @param pdev the pointer of platform device.
+ * @param setup the initial function pointer of network device.
+ *
+ * @return none
+ */
+struct net_device *flexcan_device_alloc(struct platform_device *pdev,
+ void (*setup) (struct net_device *dev))
+{
+ struct flexcan_device *flexcan;
+ struct net_device *net;
+ int i, num;
+
+ net = alloc_netdev(sizeof(*flexcan), "can%d", setup);
+ if (net == NULL) {
+ printk(KERN_ERR "Allocate netdevice for FlexCAN fail!\n");
+ return NULL;
+ }
+ flexcan = netdev_priv(net);
+ memset(flexcan, 0, sizeof(*flexcan));
+
+ mutex_init(&flexcan->mutex);
+ init_timer(&flexcan->timer);
+
+ flexcan->dev = pdev;
+ if (flexcan_device_attach(flexcan)) {
+ printk(KERN_ERR "Attach FlexCAN fail!\n");
+ free_netdev(net);
+ return NULL;
+ }
+ flexcan_device_default(flexcan);
+ flexcan_update_bitrate(flexcan);
+
+ num = ARRAY_SIZE(flexcan_dev_attr);
+
+ for (i = 0; i < num; i++) {
+ if (device_create_file(&pdev->dev, flexcan_dev_attr + i)) {
+ printk(KERN_ERR "Create attribute file fail!\n");
+ break;
+ }
+ }
+
+ if (i != num) {
+ for (; i >= 0; i--)
+ device_remove_file(&pdev->dev, flexcan_dev_attr + i);
+ free_netdev(net);
+ return NULL;
+ }
+ dev_set_drvdata(&pdev->dev, net);
+ return net;
+}
+
+/*!
+ * @brief The function frees can device.
+ *
+ * @param pdev the pointer of platform device.
+ *
+ * @return none
+ */
+void flexcan_device_free(struct platform_device *pdev)
+{
+ struct net_device *net;
+ struct flexcan_device *flexcan;
+ int i, num;
+ net = (struct net_device *)dev_get_drvdata(&pdev->dev);
+
+ unregister_netdev(net);
+ flexcan = netdev_priv(net);
+ del_timer(&flexcan->timer);
+
+ num = ARRAY_SIZE(flexcan_dev_attr);
+
+ for (i = 0; i < num; i++)
+ device_remove_file(&pdev->dev, flexcan_dev_attr + i);
+
+ flexcan_device_detach(netdev_priv(net));
+ free_netdev(net);
+}
diff --git a/drivers/net/can/flexcan/drv.c b/drivers/net/can/flexcan/drv.c
new file mode 100644
index 000000000000..0e8445e59022
--- /dev/null
+++ b/drivers/net/can/flexcan/drv.c
@@ -0,0 +1,624 @@
+/*
+ * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file drv.c
+ *
+ * @brief Driver for Freescale CAN Controller FlexCAN.
+ *
+ * @ingroup can
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/regulator.h>
+#include <linux/clk.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include "flexcan.h"
+
+static void flexcan_hw_start(struct flexcan_device *flexcan)
+{
+ unsigned int reg;
+ if ((flexcan->maxmb + 1) > 32) {
+ __raw_writel(0xFFFFFFFF, flexcan->io_base + CAN_HW_REG_IMASK1);
+ reg = (1 << (flexcan->maxmb - 31)) - 1;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_IMASK2);
+ } else {
+ reg = (1 << (flexcan->maxmb + 1)) - 1;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_IMASK1);
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_IMASK2);
+ }
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR) & (~__MCR_HALT);
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_MCR);
+}
+
+static void flexcan_hw_stop(struct flexcan_device *flexcan)
+{
+ unsigned int reg;
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ __raw_writel(reg | __MCR_HALT, flexcan->io_base + CAN_HW_REG_MCR);
+}
+
+static int flexcan_hw_reset(struct flexcan_device *flexcan)
+{
+ unsigned int reg;
+ int timeout = 100000;
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ __raw_writel(reg | __MCR_MDIS, flexcan->io_base + CAN_HW_REG_MCR);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_CTRL);
+ if (flexcan->br_clksrc)
+ reg |= __CTRL_CLK_SRC;
+ else
+ reg &= ~__CTRL_CLK_SRC;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_CTRL);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR) & (~__MCR_MDIS);
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_MCR);
+ reg |= __MCR_SOFT_RST;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_MCR);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ while (reg & __MCR_SOFT_RST) {
+ if (--timeout <= 0) {
+ printk(KERN_ERR "Flexcan software Reset Timeouted\n");
+ return -1;
+ }
+ udelay(10);
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ }
+ return 0;
+}
+
+static inline void flexcan_mcr_setup(struct flexcan_device *flexcan)
+{
+ unsigned int reg;
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ reg &= ~(__MCR_MAX_MB_MASK | __MCR_WAK_MSK | __MCR_MAX_IDAM_MASK);
+
+ if (flexcan->fifo)
+ reg |= __MCR_FEN;
+ else
+ reg &= ~__MCR_FEN;
+
+ if (flexcan->wakeup)
+ reg |= __MCR_SLF_WAK | __MCR_WAK_MSK;
+ else
+ reg &= ~(__MCR_SLF_WAK | __MCR_WAK_MSK);
+
+ if (flexcan->wak_src)
+ reg |= __MCR_WAK_SRC;
+ else
+ reg &= ~__MCR_WAK_SRC;
+
+ if (flexcan->srx_dis)
+ reg |= __MCR_SRX_DIS;
+ else
+ reg &= ~__MCR_SRX_DIS;
+
+ if (flexcan->bcc)
+ reg |= __MCR_BCC;
+ else
+ reg &= ~__MCR_BCC;
+
+ if (flexcan->lprio)
+ reg |= __MCR_LPRIO_EN;
+ else
+ reg &= ~__MCR_LPRIO_EN;
+
+ if (flexcan->abort)
+ reg |= __MCR_AEN;
+ else
+ reg &= ~__MCR_AEN;
+
+ reg |= (flexcan->maxmb << __MCR_MAX_MB_OFFSET);
+ reg |= __MCR_DOZE | __MCR_MAX_IDAM_C;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_MCR);
+}
+
+static inline void flexcan_ctrl_setup(struct flexcan_device *flexcan)
+{
+ unsigned int reg;
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_CTRL);
+ reg &= ~(__CTRL_PRESDIV_MASK | __CTRL_RJW_MASK | __CTRL_PSEG1_MASK |
+ __CTRL_PSEG2_MASK | __CTRL_PROPSEG_MASK);
+
+ if (flexcan->loopback)
+ reg |= __CTRL_LPB;
+ else
+ reg &= ~__CTRL_LPB;
+
+ if (flexcan->smp)
+ reg |= __CTRL_SMP;
+ else
+ reg &= ~__CTRL_SMP;
+
+ if (flexcan->boff_rec)
+ reg |= __CTRL_BOFF_REC;
+ else
+ reg &= ~__CTRL_BOFF_REC;
+
+ if (flexcan->tsyn)
+ reg |= __CTRL_TSYN;
+ else
+ reg &= ~__CTRL_TSYN;
+
+ if (flexcan->listen)
+ reg |= __CTRL_LOM;
+ else
+ reg &= ~__CTRL_LOM;
+
+ reg |= (flexcan->br_presdiv << __CTRL_PRESDIV_OFFSET) |
+ (flexcan->br_rjw << __CTRL_RJW_OFFSET) |
+ (flexcan->br_pseg1 << __CTRL_PSEG1_OFFSET) |
+ (flexcan->br_pseg2 << __CTRL_PSEG2_OFFSET) |
+ (flexcan->br_propseg << __CTRL_PROPSEG_OFFSET);
+
+ reg &= ~__CTRL_LBUF;
+
+ reg |= __CTRL_TWRN_MSK | __CTRL_RWRN_MSK | __CTRL_BOFF_MSK |
+ __CTRL_ERR_MSK;
+
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_CTRL);
+}
+
+static int flexcan_hw_restart(struct net_device *dev)
+{
+ unsigned int reg;
+ struct flexcan_device *flexcan = netdev_priv(dev);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ if (reg & __MCR_SOFT_RST)
+ return 1;
+
+ flexcan_mcr_setup(flexcan);
+
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_IMASK2);
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_IMASK1);
+
+ __raw_writel(0xFFFFFFFF, flexcan->io_base + CAN_HW_REG_IFLAG2);
+ __raw_writel(0xFFFFFFFF, flexcan->io_base + CAN_HW_REG_IFLAG1);
+
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_ECR);
+
+ flexcan_mbm_init(flexcan);
+ netif_carrier_on(dev);
+ flexcan_hw_start(flexcan);
+
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static void flexcan_hw_watch(unsigned long data)
+{
+ unsigned int reg, ecr;
+ struct net_device *dev = (struct net_device *)data;
+ struct flexcan_device *flexcan = dev ? netdev_priv(dev) : NULL;
+
+ BUG_ON(!flexcan);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ if (reg & __MCR_MDIS) {
+ if (flexcan_hw_restart(dev))
+ mod_timer(&flexcan->timer, HZ / 20);
+ return;
+ }
+ ecr = __raw_readl(flexcan->io_base + CAN_HW_REG_ECR);
+ if (flexcan->boff_rec) {
+ if (((reg & __ESR_FLT_CONF_MASK) >> __ESR_FLT_CONF_OFF) > 1) {
+ reg |= __MCR_SOFT_RST;
+ __raw_writel(reg, flexcan->io_base + CAN_HW_REG_MCR);
+ mod_timer(&flexcan->timer, HZ / 20);
+ return;
+ }
+ netif_carrier_on(dev);
+ }
+}
+
+static void flexcan_hw_busoff(struct net_device *dev)
+{
+ struct flexcan_device *flexcan = netdev_priv(dev);
+ unsigned int reg;
+
+ netif_carrier_off(dev);
+
+ flexcan->timer.function = flexcan_hw_watch;
+ flexcan->timer.data = (unsigned long)dev;
+
+ if (flexcan->boff_rec) {
+ mod_timer(&flexcan->timer, HZ / 10);
+ return;
+ }
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_MCR);
+ __raw_writel(reg | __MCR_SOFT_RST, flexcan->io_base + CAN_HW_REG_MCR);
+ mod_timer(&flexcan->timer, HZ / 20);
+}
+
+static int flexcan_hw_open(struct flexcan_device *flexcan)
+{
+ if (flexcan_hw_reset(flexcan))
+ return -EFAULT;
+
+ flexcan_mcr_setup(flexcan);
+ flexcan_ctrl_setup(flexcan);
+
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_IMASK2);
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_IMASK1);
+
+ __raw_writel(0xFFFFFFFF, flexcan->io_base + CAN_HW_REG_IFLAG2);
+ __raw_writel(0xFFFFFFFF, flexcan->io_base + CAN_HW_REG_IFLAG1);
+
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_ECR);
+ return 0;
+}
+
+static void flexcan_err_handler(struct net_device *dev)
+{
+ struct flexcan_device *flexcan = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct can_frame *frame;
+ unsigned int esr, ecr;
+
+ esr = __raw_readl(flexcan->io_base + CAN_HW_REG_ESR);
+ __raw_writel(esr & __ESR_INTERRUPTS, flexcan->io_base + CAN_HW_REG_ESR);
+
+ if (esr & __ESR_WAK_INT)
+ return;
+
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (!skb) {
+ printk(KERN_ERR "%s: allocates skb fail in\n", __func__);
+ return;
+ }
+ frame = (struct can_frame *)skb_put(skb, sizeof(*frame));
+ memset(frame, 0, sizeof(*frame));
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->can_dlc = CAN_ERR_DLC;
+
+ if (esr & __ESR_TWRN_INT)
+ frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+
+ if (esr & __ESR_RWRN_INT)
+ frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+
+ if (esr & __ESR_BOFF_INT)
+ frame->can_id |= CAN_ERR_BUSOFF;
+
+ if (esr & __ESR_ERR_INT) {
+ if (esr & __ESR_BIT1_ERR)
+ frame->data[2] |= CAN_ERR_PROT_BIT1;
+
+ if (esr & __ESR_BIT0_ERR)
+ frame->data[2] |= CAN_ERR_PROT_BIT0;
+
+ if (esr & __ESR_ACK_ERR)
+ frame->can_id |= CAN_ERR_ACK;
+
+ /*TODO:// if (esr & __ESR_CRC_ERR) */
+
+ if (esr & __ESR_FRM_ERR)
+ frame->data[2] |= CAN_ERR_PROT_FORM;
+
+ if (esr & __ESR_STF_ERR)
+ frame->data[2] |= CAN_ERR_PROT_STUFF;
+
+ ecr = __raw_readl(flexcan->io_base + CAN_HW_REG_ECR);
+ switch ((esr & __ESR_FLT_CONF_MASK) >> __ESR_FLT_CONF_OFF) {
+ case 0:
+ if (__ECR_TX_ERR_COUNTER(ecr) >= __ECR_ACTIVE_THRESHOLD)
+ frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ if (__ECR_RX_ERR_COUNTER(ecr) >= __ECR_ACTIVE_THRESHOLD)
+ frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ break;
+ case 1:
+ if (__ECR_TX_ERR_COUNTER(ecr) >=
+ __ECR_PASSIVE_THRESHOLD)
+ frame->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+ if (__ECR_RX_ERR_COUNTER(ecr) >=
+ __ECR_PASSIVE_THRESHOLD)
+ frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ break;
+ default:
+ frame->can_id |= CAN_ERR_BUSOFF;
+ }
+ }
+
+ if (frame->can_id & CAN_ERR_BUSOFF)
+ flexcan_hw_busoff(dev);
+
+ skb->dev = dev;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_receive_skb(skb);
+}
+
+static irqreturn_t flexcan_irq_handler(int irq, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct flexcan_device *flexcan = dev ? netdev_priv(dev) : NULL;
+ unsigned int reg;
+
+ BUG_ON(!flexcan);
+
+ reg = __raw_readl(flexcan->io_base + CAN_HW_REG_ESR);
+ if (reg & __ESR_INTERRUPTS) {
+ flexcan_err_handler(dev);
+ return IRQ_HANDLED;
+ }
+
+ flexcan_mbm_isr(dev);
+ return IRQ_HANDLED;
+}
+
+static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct can_frame *frame = (struct can_frame *)skb->data;
+ struct flexcan_device *flexcan = netdev_priv(dev);
+ struct net_device_stats *stats = dev->get_stats(dev);
+
+ BUG_ON(!flexcan);
+
+ if (frame->can_dlc > 8)
+ return -EINVAL;
+
+ if (!flexcan_mbm_xmit(flexcan, frame)) {
+ dev_kfree_skb(skb);
+ stats->tx_bytes += frame->can_dlc;
+ stats->tx_packets++;
+ dev->trans_start = jiffies;
+ return NETDEV_TX_OK;
+ }
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+}
+
+static int flexcan_open(struct net_device *dev)
+{
+ struct flexcan_device *flexcan;
+ struct platform_device *pdev;
+ struct flexcan_platform_data *plat_data;
+
+ flexcan = netdev_priv(dev);
+ BUG_ON(!flexcan);
+
+ pdev = flexcan->dev;
+ plat_data = (pdev->dev).platform_data;
+ if (plat_data && plat_data->active)
+ plat_data->active(pdev->id);
+
+ if (flexcan->clk)
+ if (clk_enable(flexcan->clk))
+ goto clk_err;
+
+ if (flexcan->core_reg)
+ if (regulator_enable(flexcan->core_reg))
+ goto core_reg_err;
+
+ if (flexcan->io_reg)
+ if (regulator_enable(flexcan->io_reg))
+ goto io_reg_err;
+
+ if (plat_data && plat_data->xcvr_enable)
+ plat_data->xcvr_enable(pdev->id, 1);
+
+ if (request_irq(flexcan->irq, flexcan_irq_handler, IRQF_SAMPLE_RANDOM,
+ dev->name, dev))
+ goto irq_err;
+
+ if (flexcan_hw_open(flexcan))
+ goto open_err;
+
+ flexcan_mbm_init(flexcan);
+ netif_carrier_on(dev);
+ flexcan_hw_start(flexcan);
+ return 0;
+ open_err:
+ free_irq(flexcan->irq, dev);
+ irq_err:
+ if (plat_data && plat_data->xcvr_enable)
+ plat_data->xcvr_enable(pdev->id, 0);
+
+ if (flexcan->io_reg)
+ regulator_disable(flexcan->io_reg);
+ io_reg_err:
+ if (flexcan->core_reg)
+ regulator_disable(flexcan->core_reg);
+ core_reg_err:
+ if (flexcan->clk)
+ clk_disable(flexcan->clk);
+ clk_err:
+ if (plat_data && plat_data->inactive)
+ plat_data->inactive(pdev->id);
+ return -ENODEV;
+}
+
+static int flexcan_stop(struct net_device *dev)
+{
+ struct flexcan_device *flexcan;
+ struct platform_device *pdev;
+ struct flexcan_platform_data *plat_data;
+
+ flexcan = netdev_priv(dev);
+
+ BUG_ON(!flexcan);
+
+ pdev = flexcan->dev;
+ plat_data = (pdev->dev).platform_data;
+
+ flexcan_hw_stop(flexcan);
+
+ free_irq(flexcan->irq, dev);
+
+ if (plat_data && plat_data->xcvr_enable)
+ plat_data->xcvr_enable(pdev->id, 0);
+
+ if (flexcan->io_reg)
+ regulator_disable(flexcan->io_reg);
+ if (flexcan->core_reg)
+ regulator_disable(flexcan->core_reg);
+ if (flexcan->clk)
+ clk_disable(flexcan->clk);
+ if (plat_data && plat_data->inactive)
+ plat_data->inactive(pdev->id);
+ return 0;
+}
+
+static void flexcan_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_CAN;
+ dev->mtu = sizeof(struct can_frame);
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = FLEXCAN_MAX_MB;
+ dev->flags = IFF_NOARP;
+ dev->features = NETIF_F_NO_CSUM;
+
+ dev->open = flexcan_open;
+ dev->stop = flexcan_stop;
+ dev->hard_start_xmit = flexcan_start_xmit;
+}
+
+static int flexcan_probe(struct platform_device *pdev)
+{
+ struct net_device *net;
+ net = flexcan_device_alloc(pdev, flexcan_setup);
+ if (!net)
+ return -ENOMEM;
+
+ if (register_netdev(net)) {
+ flexcan_device_free(pdev);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int flexcan_remove(struct platform_device *pdev)
+{
+ flexcan_device_free(pdev);
+ return 0;
+}
+
+static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *net;
+ struct flexcan_device *flexcan;
+ struct flexcan_platform_data *plat_data;
+ net = (struct net_device *)dev_get_drvdata(&pdev->dev);
+ flexcan = netdev_priv(net);
+
+ BUG_ON(!flexcan);
+
+ if (!(net->flags & IFF_UP))
+ return 0;
+ if (flexcan->wakeup)
+ set_irq_wake(flexcan->irq, 1);
+ else {
+ plat_data = (pdev->dev).platform_data;
+
+ if (plat_data && plat_data->xcvr_enable)
+ plat_data->xcvr_enable(pdev->id, 0);
+
+ if (flexcan->io_reg)
+ regulator_disable(flexcan->io_reg);
+ if (flexcan->core_reg)
+ regulator_disable(flexcan->core_reg);
+ if (flexcan->clk)
+ clk_disable(flexcan->clk);
+ if (plat_data && plat_data->inactive)
+ plat_data->inactive(pdev->id);
+ }
+ return 0;
+}
+
+static int flexcan_resume(struct platform_device *pdev)
+{
+ struct net_device *net;
+ struct flexcan_device *flexcan;
+ struct flexcan_platform_data *plat_data;
+ net = (struct net_device *)dev_get_drvdata(&pdev->dev);
+ flexcan = netdev_priv(net);
+
+ BUG_ON(!flexcan);
+
+ if (!(net->flags & IFF_UP))
+ return 0;
+
+ if (flexcan->wakeup)
+ set_irq_wake(flexcan->irq, 0);
+ else {
+ plat_data = (pdev->dev).platform_data;
+ if (plat_data && plat_data->active)
+ plat_data->active(pdev->id);
+
+ if (flexcan->clk) {
+ if (clk_enable(flexcan->clk))
+ printk(KERN_ERR "%s:enable clock fail\n",
+ __func__);
+ }
+
+ if (flexcan->core_reg) {
+ if (regulator_enable(flexcan->core_reg))
+ printk(KERN_ERR "%s:enable core voltage\n",
+ __func__);
+ }
+ if (flexcan->io_reg) {
+ if (regulator_enable(flexcan->io_reg))
+ printk(KERN_ERR "%s:enable io voltage\n",
+ __func__);
+ }
+
+ if (plat_data && plat_data->xcvr_enable)
+ plat_data->xcvr_enable(pdev->id, 1);
+ }
+ return 0;
+}
+
+static struct platform_driver flexcan_driver = {
+ .driver = {
+ .name = FLEXCAN_DEVICE_NAME,
+ },
+ .probe = flexcan_probe,
+ .remove = flexcan_remove,
+ .suspend = flexcan_suspend,
+ .resume = flexcan_resume,
+};
+
+static __init int flexcan_init(void)
+{
+ pr_info("Freescale FlexCAN Driver \n");
+ return platform_driver_register(&flexcan_driver);
+}
+
+static __exit void flexcan_exit(void)
+{
+ return platform_driver_unregister(&flexcan_driver);
+}
+
+module_init(flexcan_init);
+module_exit(flexcan_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
new file mode 100644
index 000000000000..ade22849892f
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file flexcan.h
+ *
+ * @brief FlexCan definitions.
+ *
+ * @ingroup can
+ */
+
+#ifndef __CAN_FLEXCAN_H__
+#define __CAN_FLEXCAN_H__
+
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/regulator.h>
+#include <linux/clk.h>
+#include <linux/can.h>
+#include <linux/can/core.h>
+#include <linux/can/error.h>
+
+#define FLEXCAN_DEVICE_NAME "FlexCAN"
+
+struct can_mb_cs {
+ unsigned int time_stamp:16;
+ unsigned int length:4;
+ unsigned int rtr:1;
+ unsigned int ide:1;
+ unsigned int srr:1;
+ unsigned int nouse1:1;
+ unsigned int code:4;
+ unsigned int nouse2:4;
+};
+
+#define CAN_MB_RX_INACTIVE 0x0
+#define CAN_MB_RX_EMPTY 0x4
+#define CAN_MB_RX_FULL 0x2
+#define CAN_MB_RX_OVERRUN 0x6
+#define CAN_MB_RX_BUSY 0x1
+
+#define CAN_MB_TX_INACTIVE 0x8
+#define CAN_MB_TX_ABORT 0x9
+#define CAN_MB_TX_ONCE 0xC
+#define CAN_MB_TX_REMOTE 0xA
+
+struct can_hw_mb {
+ union {
+ struct can_mb_cs cs;
+ unsigned int data;
+ } mb_cs;
+ unsigned int mb_id;
+ unsigned char mb_data[8];
+};
+
+#define CAN_HW_REG_MCR 0x00
+#define CAN_HW_REG_CTRL 0x04
+#define CAN_HW_REG_TIMER 0x08
+#define CAN_HW_REG_RXGMASK 0x10
+#define CAN_HW_REG_RX14MASK 0x14
+#define CAN_HW_REG_RX15MASK 0x18
+#define CAN_HW_REG_ECR 0x1C
+#define CAN_HW_REG_ESR 0x20
+#define CAN_HW_REG_IMASK2 0x24
+#define CAN_HW_REG_IMASK1 0x28
+#define CAN_HW_REG_IFLAG2 0x2C
+#define CAN_HW_REG_IFLAG1 0x30
+
+#define CAN_MB_BASE 0x0080
+#define CAN_RXMASK_BASE 0x0880
+#define CAN_FIFO_BASE 0xE0
+
+#define __MCR_MDIS (1 << 31)
+#define __MCR_FRZ (1 << 30)
+#define __MCR_FEN (1 << 29)
+#define __MCR_HALT (1 << 28)
+#define __MCR_NOTRDY (1 << 27)
+#define __MCR_WAK_MSK (1 << 26)
+#define __MCR_SOFT_RST (1 << 25)
+#define __MCR_FRZ_ACK (1 << 24)
+#define __MCR_SLF_WAK (1 << 22)
+#define __MCR_WRN_EN (1 << 21)
+#define __MCR_LPM_ACK (1 << 20)
+#define __MCR_WAK_SRC (1 << 19)
+#define __MCR_DOZE (1 << 18)
+#define __MCR_SRX_DIS (1 << 17)
+#define __MCR_BCC (1 << 16)
+#define __MCR_LPRIO_EN (1 << 13)
+#define __MCR_AEN (1 << 12)
+#define __MCR_MAX_IDAM_OFFSET 8
+#define __MCR_MAX_IDAM_MASK (0x3 << __MCR_MAX_IDAM_OFFSET)
+#define __MCR_MAX_IDAM_A (0x0 << __MCR_MAX_IDAM_OFFSET)
+#define __MCR_MAX_IDAM_B (0x1 << __MCR_MAX_IDAM_OFFSET)
+#define __MCR_MAX_IDAM_C (0x2 << __MCR_MAX_IDAM_OFFSET)
+#define __MCR_MAX_IDAM_D (0x3 << __MCR_MAX_IDAM_OFFSET)
+#define __MCR_MAX_MB_OFFSET 0
+#define __MCR_MAX_MB_MASK (0x3F)
+
+#define __CTRL_PRESDIV_OFFSET 24
+#define __CTRL_PRESDIV_MASK (0xFF << __CTRL_PRESDIV_OFFSET)
+#define __CTRL_RJW_OFFSET 22
+#define __CTRL_RJW_MASK (0x3 << __CTRL_RJW_OFFSET)
+#define __CTRL_PSEG1_OFFSET 19
+#define __CTRL_PSEG1_MASK (0x7 << __CTRL_PSEG1_OFFSET)
+#define __CTRL_PSEG2_OFFSET 16
+#define __CTRL_PSEG2_MASK (0x7 << __CTRL_PSEG2_OFFSET)
+#define __CTRL_BOFF_MSK (0x1 << 15)
+#define __CTRL_ERR_MSK (0x1 << 14)
+#define __CTRL_CLK_SRC (0x1 << 13)
+#define __CTRL_LPB (0x1 << 12)
+#define __CTRL_TWRN_MSK (0x1 << 11)
+#define __CTRL_RWRN_MSK (0x1 << 10)
+#define __CTRL_SMP (0x1 << 7)
+#define __CTRL_BOFF_REC (0x1 << 6)
+#define __CTRL_TSYN (0x1 << 5)
+#define __CTRL_LBUF (0x1 << 4)
+#define __CTRL_LOM (0x1 << 3)
+#define __CTRL_PROPSEG_OFFSET 0
+#define __CTRL_PROPSEG_MASK (0x7)
+
+#define __ECR_TX_ERR_COUNTER(x) ((x) & 0xFF)
+#define __ECR_RX_ERR_COUNTER(x) (((x) >> 8) & 0xFF)
+#define __ECR_PASSIVE_THRESHOLD 128
+#define __ECR_ACTIVE_THRESHOLD 96
+
+#define __ESR_TWRN_INT (0x1 << 17)
+#define __ESR_RWRN_INT (0x1 << 16)
+#define __ESR_BIT1_ERR (0x1 << 15)
+#define __ESR_BIT0_ERR (0x1 << 14)
+#define __ESR_ACK_ERR (0x1 << 13)
+#define __ESR_CRC_ERR (0x1 << 12)
+#define __ESR_FRM_ERR (0x1 << 11)
+#define __ESR_STF_ERR (0x1 << 10)
+#define __ESR_TX_WRN (0x1 << 9)
+#define __ESR_RX_WRN (0x1 << 8)
+#define __ESR_IDLE (0x1 << 7)
+#define __ESR_TXRX (0x1 << 6)
+#define __ESR_FLT_CONF_OFF 4
+#define __ESR_FLT_CONF_MASK (0x3 << __ESR_FLT_CONF_OFF)
+#define __ESR_BOFF_INT (0x1 << 2)
+#define __ESR_ERR_INT (0x1 << 1)
+#define __ESR_WAK_INT (0x1)
+
+#define __ESR_INTERRUPTS (__ESR_WAK_INT | __ESR_ERR_INT | \
+ __ESR_BOFF_INT | __ESR_TWRN_INT | \
+ __ESR_RWRN_INT)
+
+#define __FIFO_OV_INT 0x0080
+#define __FIFO_WARN_INT 0x0040
+#define __FIFO_RDY_INT 0x0020
+
+struct flexcan_device {
+ struct mutex mutex;
+ void *io_base;
+ struct can_hw_mb *hwmb;
+ unsigned int *rx_mask;
+ unsigned int xmit_mb;
+ unsigned int bitrate;
+ /* word 1 */
+ unsigned int br_presdiv:8;
+ unsigned int br_rjw:2;
+ unsigned int br_propseg:3;
+ unsigned int br_pseg1:3;
+ unsigned int br_pseg2:3;
+ unsigned int maxmb:6;
+ unsigned int xmit_maxmb:6;
+ unsigned int wd1_resv:1;
+
+ /* word 2 */
+ unsigned int fifo:1;
+ unsigned int wakeup:1;
+ unsigned int srx_dis:1;
+ unsigned int wak_src:1;
+ unsigned int bcc:1;
+ unsigned int lprio:1;
+ unsigned int abort:1;
+ unsigned int br_clksrc:1;
+ unsigned int loopback:1;
+ unsigned int smp:1;
+ unsigned int boff_rec:1;
+ unsigned int tsyn:1;
+ unsigned int listen:1;
+
+ unsigned int ext_msg:1;
+ unsigned int std_msg:1;
+
+ struct timer_list timer;
+ struct platform_device *dev;
+ struct regulator *core_reg;
+ struct regulator *io_reg;
+ struct clk *clk;
+ int irq;
+};
+
+#define FLEXCAN_MAX_FIFO_MB 8
+#define FLEXCAN_MAX_MB 64
+#define FLEXCAN_MAX_PRESDIV 256
+#define FLEXCAN_MAX_RJW 4
+#define FLEXCAN_MAX_PSEG1 8
+#define FLEXCAN_MAX_PSEG2 8
+#define FLEXCAN_MAX_PROPSEG 8
+#define FLEXCAN_MAX_BITRATE 1000000
+
+extern struct net_device *flexcan_device_alloc(struct platform_device *pdev,
+ void (*setup) (struct net_device
+ *dev));
+extern void flexcan_device_free(struct platform_device *pdev);
+
+extern void flexcan_mbm_init(struct flexcan_device *flexcan);
+extern void flexcan_mbm_isr(struct net_device *dev);
+extern int flexcan_mbm_xmit(struct flexcan_device *flexcan,
+ struct can_frame *frame);
+#endif /* __CAN_FLEXCAN_H__ */
diff --git a/drivers/net/can/flexcan/mbm.c b/drivers/net/can/flexcan/mbm.c
new file mode 100644
index 000000000000..6c0647045e79
--- /dev/null
+++ b/drivers/net/can/flexcan/mbm.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file mbm.c
+ *
+ * @brief Driver for Freescale CAN Controller FlexCAN.
+ *
+ * @ingroup can
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include "flexcan.h"
+
+#define flexcan_swab32(x) \
+ (((x) << 24) | ((x) >> 24) |\
+ (((x) & (__u32)0x0000ff00UL) << 8) |\
+ (((x) & (__u32)0x00ff0000UL) >> 8))
+
+static inline void flexcan_memcpy(void *dst, void *src, int len)
+{
+ int i;
+ unsigned int *d = (unsigned int *)dst, *s = (unsigned int *)src;
+ len = (len + 3) >> 2;
+ for (i = 0; i < len; i++, s++, d++)
+ *d = flexcan_swab32(*s);
+}
+
+static void flexcan_mb_bottom(struct net_device *dev, int index)
+{
+ struct flexcan_device *flexcan = netdev_priv(dev);
+ struct net_device_stats *stats = dev->get_stats(dev);
+ struct can_hw_mb *hwmb;
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ unsigned int tmp;
+
+ hwmb = flexcan->hwmb + index;
+ if (flexcan->fifo || (index >= (flexcan->maxmb - flexcan->xmit_maxmb))) {
+ if (hwmb->mb_cs.cs.code == CAN_MB_TX_ABORT)
+ hwmb->mb_cs.cs.code = CAN_MB_TX_INACTIVE;
+
+ if (hwmb->mb_cs.cs.code & CAN_MB_TX_INACTIVE) {
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ return;
+ }
+ }
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb) {
+ frame = (struct can_frame *)skb_put(skb, sizeof(*frame));
+ memset(frame, 0, sizeof(*frame));
+ if (hwmb->mb_cs.cs.ide)
+ frame->can_id =
+ (hwmb->mb_id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ frame->can_id = (hwmb->mb_id >> 18) & CAN_SFF_MASK;
+
+ if (hwmb->mb_cs.cs.rtr)
+ frame->can_id |= CAN_RTR_FLAG;
+
+ frame->can_dlc = hwmb->mb_cs.cs.length;
+
+ if (frame->can_dlc && frame->can_dlc)
+ flexcan_memcpy(frame->data, hwmb->mb_data,
+ frame->can_dlc);
+
+ if (flexcan->fifo
+ || (index >= (flexcan->maxmb - flexcan->xmit_maxmb))) {
+ hwmb->mb_cs.cs.code = CAN_MB_TX_INACTIVE;
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ }
+
+ tmp = __raw_readl(flexcan->io_base + CAN_HW_REG_TIMER);
+
+ dev->last_rx = jiffies;
+ stats->rx_packets++;
+ stats->rx_bytes += frame->can_dlc;
+
+ skb->dev = dev;
+ skb->protocol = __constant_htons(ETH_P_CAN);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_receive_skb(skb);
+ } else {
+ tmp = hwmb->mb_cs.data;
+ tmp = hwmb->mb_id;
+ tmp = hwmb->mb_data[0];
+ if (flexcan->fifo
+ || (index >= (flexcan->maxmb - flexcan->xmit_maxmb))) {
+
+ hwmb->mb_cs.cs.code = CAN_MB_TX_INACTIVE;
+ if (netif_queue_stopped(dev))
+ netif_start_queue(dev);
+ }
+ tmp = __raw_readl(flexcan->io_base + CAN_HW_REG_TIMER);
+ stats->rx_dropped++;
+ }
+}
+
+static void flexcan_fifo_isr(struct net_device *dev, unsigned int iflag1)
+{
+ struct flexcan_device *flexcan = dev ? netdev_priv(dev) : NULL;
+ struct net_device_stats *stats = dev->get_stats(dev);
+ struct sk_buff *skb;
+ struct can_hw_mb *hwmb = flexcan->hwmb;
+ struct can_frame *frame;
+ unsigned int tmp;
+
+ if (iflag1 & __FIFO_RDY_INT) {
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb) {
+ frame =
+ (struct can_frame *)skb_put(skb, sizeof(*frame));
+ memset(frame, 0, sizeof(*frame));
+ if (hwmb->mb_cs.cs.ide)
+ frame->can_id =
+ (hwmb->mb_id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ frame->can_id =
+ (hwmb->mb_id >> 18) & CAN_SFF_MASK;
+
+ if (hwmb->mb_cs.cs.rtr)
+ frame->can_id |= CAN_RTR_FLAG;
+
+ frame->can_dlc = hwmb->mb_cs.cs.length;
+
+ if (frame->can_dlc && (frame->can_dlc <= 8))
+ flexcan_memcpy(frame->data, hwmb->mb_data,
+ frame->can_dlc);
+ tmp = __raw_readl(flexcan->io_base + CAN_HW_REG_TIMER);
+
+ dev->last_rx = jiffies;
+
+ stats->rx_packets++;
+ stats->rx_bytes += frame->can_dlc;
+
+ skb->dev = dev;
+ skb->protocol = __constant_htons(ETH_P_CAN);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_receive_skb(skb);
+ } else {
+ tmp = hwmb->mb_cs.data;
+ tmp = hwmb->mb_id;
+ tmp = hwmb->mb_data[0];
+ tmp = __raw_readl(flexcan->io_base + CAN_HW_REG_TIMER);
+ }
+ }
+
+ if (iflag1 & (__FIFO_OV_INT | __FIFO_WARN_INT)) {
+ skb = dev_alloc_skb(sizeof(struct can_frame));
+ if (skb) {
+ frame =
+ (struct can_frame *)skb_put(skb, sizeof(*frame));
+ memset(frame, 0, sizeof(*frame));
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->can_dlc = CAN_ERR_DLC;
+ if (iflag1 & __FIFO_WARN_INT)
+ frame->data[1] |=
+ CAN_ERR_CRTL_TX_WARNING |
+ CAN_ERR_CRTL_RX_WARNING;
+ if (iflag1 & __FIFO_OV_INT)
+ frame->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb->dev = dev;
+ skb->protocol = __constant_htons(ETH_P_CAN);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_receive_skb(skb);
+ }
+ }
+}
+
+/*!
+ * @brief The function call by CAN ISR to handle mb events.
+ *
+ * @param dev the pointer of network device.
+ *
+ * @return none
+ */
+void flexcan_mbm_isr(struct net_device *dev)
+{
+ int i, iflag1, iflag2, maxmb;
+ struct flexcan_device *flexcan = dev ? netdev_priv(dev) : NULL;
+
+ if (flexcan->maxmb > 31) {
+ maxmb = flexcan->maxmb + 1 - 32;
+ iflag1 = __raw_readl(flexcan->io_base + CAN_HW_REG_IFLAG1) &
+ __raw_readl(flexcan->io_base + CAN_HW_REG_IMASK1);
+ iflag2 = __raw_readl(flexcan->io_base + CAN_HW_REG_IFLAG2) &
+ __raw_readl(flexcan->io_base + CAN_HW_REG_IMASK2);
+ iflag2 &= (1 << maxmb) - 1;
+ maxmb = 32;
+ } else {
+ maxmb = flexcan->maxmb + 1;
+ iflag1 = __raw_readl(flexcan->io_base + CAN_HW_REG_IFLAG1) &
+ __raw_readl(flexcan->io_base + CAN_HW_REG_IMASK1);
+ iflag1 &= (1 << maxmb) - 1;
+ iflag2 = 0;
+ }
+
+ __raw_writel(iflag1, flexcan->io_base + CAN_HW_REG_IFLAG1);
+ __raw_writel(iflag2, flexcan->io_base + CAN_HW_REG_IFLAG2);
+
+ if (flexcan->fifo) {
+ flexcan_fifo_isr(dev, iflag1);
+ iflag1 &= 0xFFFFFF00;
+ }
+ for (i = 0; iflag1 && (i < maxmb); i++) {
+ if (iflag1 & (1 << i)) {
+ iflag1 &= ~(1 << i);
+ flexcan_mb_bottom(dev, i);
+ }
+ }
+
+ for (i = maxmb; iflag2 && (i <= flexcan->maxmb); i++) {
+ if (iflag2 & (1 << (i - 32))) {
+ iflag2 &= ~(1 << (i - 32));
+ flexcan_mb_bottom(dev, i);
+ }
+ }
+}
+
+/*!
+ * @brief function to xmit message buffer
+ *
+ * @param flexcan the pointer of can hardware device.
+ * @param frame the pointer of can message frame.
+ *
+ * @return Returns 0 if xmit is success. otherwise returns non-zero.
+ */
+int flexcan_mbm_xmit(struct flexcan_device *flexcan, struct can_frame *frame)
+{
+ int i = flexcan->xmit_mb;
+ struct can_hw_mb *hwmb = flexcan->hwmb;
+
+ do {
+ if (hwmb[i].mb_cs.cs.code == CAN_MB_TX_INACTIVE)
+ break;
+ if ((++i) > flexcan->maxmb) {
+ if (flexcan->fifo)
+ i = FLEXCAN_MAX_FIFO_MB;
+ else
+ i = flexcan->xmit_maxmb + 1;
+ }
+ if (i == flexcan->xmit_mb)
+ return -1;
+ } while (1);
+
+ flexcan->xmit_mb = i + 1;
+ if (flexcan->xmit_mb > flexcan->maxmb) {
+ if (flexcan->fifo)
+ flexcan->xmit_mb = FLEXCAN_MAX_FIFO_MB;
+ else
+ flexcan->xmit_mb = flexcan->xmit_maxmb + 1;
+ }
+
+ if (frame->can_id & CAN_RTR_FLAG)
+ hwmb[i].mb_cs.cs.rtr = 1;
+ else
+ hwmb[i].mb_cs.cs.rtr = 0;
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ hwmb[i].mb_cs.cs.ide = 1;
+ hwmb[i].mb_cs.cs.srr = 1;
+ hwmb[i].mb_id = frame->can_id & CAN_EFF_MASK;
+ } else {
+ hwmb[i].mb_cs.cs.ide = 0;
+ hwmb[i].mb_id = (frame->can_id & CAN_SFF_MASK) << 18;
+ }
+
+ hwmb[i].mb_cs.cs.length = frame->can_dlc;
+ flexcan_memcpy(hwmb[i].mb_data, frame->data, frame->can_dlc);
+ hwmb[i].mb_cs.cs.code = CAN_MB_TX_ONCE;
+ return 0;
+}
+
+/*!
+ * @brief function to initial message buffer
+ *
+ * @param flexcan the pointer of can hardware device.
+ *
+ * @return none
+ */
+void flexcan_mbm_init(struct flexcan_device *flexcan)
+{
+ struct can_hw_mb *hwmb;
+ int rx_mb, i;
+
+ /* Set global mask to receive all messages */
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_RXGMASK);
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_RX14MASK);
+ __raw_writel(0, flexcan->io_base + CAN_HW_REG_RX15MASK);
+
+ memset(flexcan->hwmb, 0, sizeof(*hwmb) * FLEXCAN_MAX_MB);
+ /* Set individual mask to receive all messages */
+ memset(flexcan->rx_mask, 0, sizeof(unsigned int) * FLEXCAN_MAX_MB);
+
+ if (flexcan->fifo)
+ rx_mb = FLEXCAN_MAX_FIFO_MB;
+ else
+ rx_mb = flexcan->maxmb - flexcan->xmit_maxmb;
+
+ hwmb = flexcan->hwmb;
+ if (flexcan->fifo) {
+ unsigned long *id_table = flexcan->io_base + CAN_FIFO_BASE;
+ for (i = 0; i < rx_mb; i++)
+ id_table[i] = 0;
+ } else {
+ for (i = 0; i < rx_mb; i++) {
+ hwmb[i].mb_cs.cs.code = CAN_MB_RX_EMPTY;
+ /*
+ * IDE bit can not control by mask registers
+ * So set message buffer to receive extend
+ * or standard message.
+ */
+ if (flexcan->ext_msg && flexcan->std_msg)
+ hwmb[i].mb_cs.cs.ide = i & 1;
+ else {
+ if (flexcan->ext_msg)
+ hwmb[i].mb_cs.cs.ide = 1;
+ }
+ }
+ }
+
+ for (; i <= flexcan->maxmb; i++)
+ hwmb[i].mb_cs.cs.code = CAN_MB_TX_INACTIVE;
+
+ flexcan->xmit_mb = rx_mb;
+}
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 7107620f615d..d56846d8d5b4 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -193,12 +193,17 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
-#elif defined(CONFIG_MACH_MX31ADS)
-#include <mach/board-mx31ads.h>
-static unsigned int netcard_portlist[] __used __initdata = {
- PBC_BASE_ADDRESS + PBC_CS8900A_IOBASE + 0x300, 0
-};
-static unsigned cs8900_irq_map[] = {EXPIO_INT_ENET_INT, 0, 0, 0};
+#elif defined(CONFIG_ARCH_MXC)
+/*! Null terminated portlist used to probe for the CS8900A device on ISA Bus
+ * Add 3 to reset the page window before probing (fixes eth probe when deployed
+ * using nand_boot)
+ */
+extern unsigned int netcard_portlist[2];
+/*!
+ * The CS8900A has 4 IRQ pins, which is software selectable, CS8900A interrupt
+ * pin 0 is used for interrupt generation.
+ */
+extern unsigned int cs8900_irq_map[4];
#else
static unsigned int netcard_portlist[] __used __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
@@ -1034,7 +1039,7 @@ skip_this_frame:
void __init reset_chip(struct net_device *dev)
{
-#if !defined(CONFIG_MACH_MX31ADS)
+#if !defined(CONFIG_ARCH_MXC)
#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
struct net_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
@@ -1063,7 +1068,7 @@ void __init reset_chip(struct net_device *dev)
reset_start_time = jiffies;
while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
;
-#endif /* !CONFIG_MACH_MX31ADS */
+#endif /* !CONFIG_ARCH_MXC */
}
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index ecd5c71a7a8a..ea80b5407a1d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -18,6 +18,9 @@
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
*/
+/*
+ * Copyright 2006-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -36,16 +39,29 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/mach-types.h>
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
+#define FEC_ALIGNMENT (0x03) /*FEC needs 4bytes alignment*/
+#elif defined(CONFIG_ARCH_MXC)
+#include <mach/hardware.h>
+#include <mach/iim.h>
+#include "fec.h"
+#define FEC_ALIGNMENT (0x0F) /*FEC needs 128bits(32bytes) alignment*/
+#endif
+
+#define FEC_ADDR_ALIGNMENT(x) ((unsigned char *)(((unsigned long )(x) + (FEC_ALIGNMENT)) & (~FEC_ALIGNMENT)))
#if defined(CONFIG_FEC2)
#define FEC_MAX_PORTS 2
@@ -53,7 +69,7 @@
#define FEC_MAX_PORTS 1
#endif
-#if defined(CONFIG_M5272)
+#if defined(CONFIG_M5272) || defined(CONFIG_ARCH_MXC)
#define HAVE_mii_link_interrupt
#endif
@@ -72,6 +88,8 @@ static unsigned int fec_hw[] = {
(MCF_MBAR+0x30000),
#elif defined(CONFIG_M532x)
(MCF_MBAR+0xfc030000),
+#elif defined(CONFIG_ARCH_MXC)
+ (IO_ADDRESS(FEC_BASE_ADDR)),
#else
&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
#endif
@@ -149,6 +167,12 @@ typedef struct {
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#ifndef CONFIG_ARCH_MXC
+#define FEC_ENET_MASK ((uint)0xffc00000)
+#else
+#define FEC_ENET_MASK ((uint)0xfff80000)
+#endif
+
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define PKT_MAXBUF_SIZE 1518
@@ -162,7 +186,7 @@ typedef struct {
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -185,11 +209,13 @@ struct fec_enet_private {
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
ushort skb_cur;
ushort skb_dirty;
/* CPM dual port RAM relative addresses.
*/
+ void * cbd_mem_base; /* save the virtual base address of rx&tx buffer descripter */
cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
cbd_t *tx_bd_base;
cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
@@ -206,6 +232,7 @@ struct fec_enet_private {
uint phy_speed;
phy_info_t const *phy;
struct work_struct phy_task;
+ struct net_device *net;
uint sequence_done;
uint mii_phy_task_queued;
@@ -217,6 +244,8 @@ struct fec_enet_private {
int link;
int old_link;
int full_duplex;
+
+ struct clk *clk;
};
static int fec_enet_open(struct net_device *dev);
@@ -231,6 +260,17 @@ static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
static void fec_set_mac_address(struct net_device *dev);
+static void __inline__ fec_dcache_inv_range(void * start, void * end);
+static void __inline__ fec_dcache_flush_range(void * start, void * end);
+
+/*
+ * fec_copy_threshold controls the copy when recieving ethernet frame.
+ * If ethernet header aligns 4bytes, the ip header and upper header will not aligns 4bytes.
+ * The resean is ethernet header is 14bytes.
+ * And the max size of tcp & ip header is 128bytes. Normally it is 40bytes.
+ * So I set the default value between 128 to 256.
+ */
+static int fec_copy_threshold = 192;
/* MII processing. We keep this as simple as possible. Requests are
* placed on the list (if there is room). When the request is finished
@@ -342,10 +382,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
- if (bdp->cbd_bufaddr & 0x3) {
+ if ((bdp->cbd_bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
- memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen);
+ memcpy(fep->tx_bounce[index], (void *) skb->data, skb->len);
bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
}
@@ -359,8 +399,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- flush_dcache_range((unsigned long)skb->data,
- (unsigned long)skb->data + skb->len);
+ fec_dcache_flush_range(__va(bdp->cbd_bufaddr), __va(bdp->cbd_bufaddr) +
+ bdp->cbd_datlen);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
@@ -373,7 +413,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
/* Trigger transmission start */
- fecp->fec_x_des_active = 0;
+ fecp->fec_x_des_active = 0x01000000;
/* If this was the last BD in the ring, start at the beginning again.
*/
@@ -460,7 +500,7 @@ fec_enet_interrupt(int irq, void * dev_id)
/* Handle receive event in its own function.
*/
- if (int_events & FEC_ENET_RXF) {
+ if (int_events & (FEC_ENET_RXF | FEC_ENET_RXB)) {
ret = IRQ_HANDLED;
fec_enet_rx(dev);
}
@@ -469,7 +509,7 @@ fec_enet_interrupt(int irq, void * dev_id)
descriptors. FEC handles all errors, we just discover
them as part of the transmit process.
*/
- if (int_events & FEC_ENET_TXF) {
+ if (int_events & (FEC_ENET_TXF | FEC_ENET_TXB)) {
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
@@ -572,6 +612,7 @@ fec_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+ int rx_index ;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -588,7 +629,7 @@ fec_enet_rx(struct net_device *dev)
bdp = fep->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
-
+ rx_index = bdp - fep->rx_bd_base;
#ifndef final_version
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
@@ -632,20 +673,36 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
pkt_len = bdp->cbd_datlen;
dev->stats.rx_bytes += pkt_len;
data = (__u8*)__va(bdp->cbd_bufaddr);
+ fec_dcache_inv_range(data, data+pkt_len -4);
/* This does 16 byte alignment, exactly what we need.
* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = dev_alloc_skb(pkt_len-4);
+ if ((pkt_len - 4) < fec_copy_threshold) {
+ skb = dev_alloc_skb(pkt_len);
+ } else {
+ skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ }
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
} else {
- skb_put(skb,pkt_len-4); /* Make room */
+ if ((pkt_len - 4) < fec_copy_threshold) {
+ skb_reserve(skb, 2); /*skip 2bytes, so ipheader is align 4bytes*/
+ skb_put(skb,pkt_len-4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len-4);
+ } else {
+ struct sk_buff * pskb = fep->rx_skbuff[rx_index];
+
+ fep->rx_skbuff[rx_index] = skb;
+ skb->data = FEC_ADDR_ALIGNMENT(skb->data);
+ bdp->cbd_bufaddr = __pa(skb->data);
+ skb_put(pskb,pkt_len-4); /* Make room */
+ skb = pskb;
+ }
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
@@ -672,7 +729,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- fecp->fec_r_des_active = 0;
+ fecp->fec_r_des_active = 0x01000000;
#endif
} /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
fep->cur_rx = (cbd_t *)bdp;
@@ -1207,6 +1264,26 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1216,6 +1293,7 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
NULL
};
@@ -1227,6 +1305,21 @@ mii_link_interrupt(int irq, void * dev_id);
#if defined(CONFIG_M5272)
/*
+ * * do some initializtion based architecture of this chip
+ * */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * * do some cleanup based architecture of this chip
+ * */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
+/*
* Code specific to Coldfire 5272 setup.
*/
static void __inline__ fec_request_intrs(struct net_device *dev)
@@ -1327,15 +1420,40 @@ static void __inline__ fec_phy_ack_intr(void)
*icrp = 0x0d000000;
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
}
/*
- * Do not need to make region uncached on 5272.
+ * invalidate dcache related with the virtual memory range(start, end)
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return addr;
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
/* ------------------------------------------------------------------------- */
@@ -1343,6 +1461,22 @@ static void __inline__ fec_uncache(unsigned long addr)
#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
+/*
* Code specific to Coldfire 5230/5231/5232/5234/5235,
* the 5270/5271/5274/5275 and 5280/5282 setups.
*/
@@ -1489,20 +1623,58 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
+ return addr;
}
/*
- * Do not need to make region uncached on 5272.
+ * unmap memory erea started with addr from uncachable erea.
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static void __inline__ fec_unmap_uncache(void * addr)
{
+ return ;
}
/* ------------------------------------------------------------------------- */
#elif defined(CONFIG_M520x)
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
/*
* Code specific to Coldfire 520x
@@ -1610,17 +1782,63 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
}
-static void __inline__ fec_uncache(unsigned long addr)
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
{
+ return ;
}
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return addr;
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
+}
+
+
/* ------------------------------------------------------------------------- */
#elif defined(CONFIG_M532x)
+
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
/*
* Code specific for M532x
*/
@@ -1749,21 +1967,294 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
{
+ return ;
}
/*
- * Do not need to make region uncached on 532x.
+ * map memory space (addr, addr+size) to uncachable erea.
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
+ return addr;
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
/* ------------------------------------------------------------------------- */
+#elif defined(CONFIG_ARCH_MXC)
+
+extern void gpio_fec_active(void);
+extern void gpio_fec_inactive(void);
+extern unsigned int expio_intr_fec;
+
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ struct clk *clk;
+ gpio_fec_active();
+ clk = clk_get(NULL, "fec_clk");
+ clk_enable(clk);
+ clk_put(clk);
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ struct clk *clk;
+ clk = clk_get(NULL, "fec_clk");
+ clk_disable(clk);
+ clk_put(clk);
+ gpio_fec_inactive();
+ return;
+}
+
+/*
+ * Code specific to Freescale i.MXC
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ /* Setup interrupt handlers. */
+ if (request_irq(MXC_INT_FEC, fec_enet_interrupt, 0, "fec", dev) != 0)
+ panic("FEC: Could not allocate FEC IRQ(%d)!\n", MXC_INT_FEC);
+ /* TODO: disable now due to CPLD issue */
+ if ((expio_intr_fec > 0) &&
+ (request_irq(expio_intr_fec, mii_link_interrupt, 0, "fec(MII)", dev) != 0))
+ panic("FEC: Could not allocate FEC(MII) IRQ(%d)!\n", expio_intr_fec);
+ disable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ u32 rate;
+ struct clk *clk;
+ volatile fec_t *fecp;
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ */
+ clk = clk_get(NULL, "fec_clk");
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ fep->phy_speed =
+ ((((rate / 2 + 4999999) / 2500000) / 2) & 0x3F) << 1;
+ fecp->fec_mii_speed = fep->phy_speed;
+ fec_restart(dev, 0);
+}
+
+#define FEC_IIM_BASE IO_ADDRESS(IIM_BASE_ADDR)
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+ int i;
+ unsigned long fec_mac_base = FEC_IIM_BASE + MXC_IIMKEY0;
+ fecp = fep->hwp;
+
+ if (fecp->fec_addr_low || fecp->fec_addr_high) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(fecp->fec_addr_low);
+ *((unsigned short *) &tmpaddr[4]) =
+ be32_to_cpu(fecp->fec_addr_high);
+ iap = &tmpaddr[0];
+ } else {
+ if (cpu_is_mx27_rev(CHIP_REV_2_0) > 0)
+ fec_mac_base = FEC_IIM_BASE + MXC_IIMMAC;
+
+ memset(tmpaddr, 0, ETH_ALEN);
+ if (!(machine_is_mx35_3ds() || cpu_is_mx51())) {
+ /*
+ * Get MAC address from IIM.
+ * If it is all 1's or 0's, use the default.
+ */
+ for (i = 0; i < ETH_ALEN; i++)
+ tmpaddr[ETH_ALEN-1-i] =
+ __raw_readb(fec_mac_base + i * 4);
+ }
+ iap = &tmpaddr[0];
+
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ }
+
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+}
+
+#ifndef MODULE
+static int fec_mac_setup(char *new_mac)
+{
+ char *ptr, *p = new_mac;
+ int i = 0;
+
+ while (p && (*p) && i < 6) {
+ ptr = strchr(p, ':');
+ if (ptr)
+ *ptr++ = '\0';
+
+ if (strlen(p)) {
+ unsigned long tmp = simple_strtoul(p, NULL, 16);
+ if (tmp > 0xff)
+ break;
+ fec_mac_default[i++] = tmp;
+ }
+ p = ptr;
+ }
+
+ return 0;
+}
+
+__setup("fec_mac=", fec_mac_setup);
+#endif
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+ if (expio_intr_fec > 0)
+ enable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+ if (expio_intr_fec > 0)
+ disable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+ if (expio_intr_fec > 0)
+ disable_irq(expio_intr_fec);
+}
+
+#ifdef CONFIG_ARCH_MX25
+/*
+ * i.MX25 allows RMII mode to be configured via a gasket
+ */
+#define FEC_MIIGSK_CFGR_FRCONT (1 << 6)
+#define FEC_MIIGSK_CFGR_LBMODE (1 << 4)
+#define FEC_MIIGSK_CFGR_EMODE (1 << 3)
+#define FEC_MIIGSK_CFGR_IF_MODE_MASK (3 << 0)
+#define FEC_MIIGSK_CFGR_IF_MODE_MII (0 << 0)
+#define FEC_MIIGSK_CFGR_IF_MODE_RMII (1 << 0)
+
+#define FEC_MIIGSK_ENR_READY (1 << 2)
+#define FEC_MIIGSK_ENR_EN (1 << 1)
+
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp = fep->hwp;
+ /*
+ * Set up the MII gasket for RMII mode
+ */
+ printk("%s: enable RMII gasket\n", dev->name);
+
+ /* disable the gasket and wait */
+ fecp->fec_miigsk_enr = 0;
+ while (fecp->fec_miigsk_enr & FEC_MIIGSK_ENR_READY)
+ udelay(1);
+
+ /* configure the gasket for RMII, 50 MHz, no loopback, no echo */
+ fecp->fec_miigsk_cfgr = FEC_MIIGSK_CFGR_IF_MODE_RMII;
+
+ /* re-enable the gasket */
+ fecp->fec_miigsk_enr = FEC_MIIGSK_ENR_EN;
+}
+#else
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+#endif
+
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ dma_sync_single(NULL, (unsigned long)__pa(start), (unsigned long) (end-start), DMA_FROM_DEVICE);
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ dma_sync_single(NULL, (unsigned long)__pa(start), (unsigned long) (end-start), DMA_BIDIRECTIONAL);
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return (unsigned long)ioremap(__pa(addr), size);
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return iounmap(addr);
+}
+
+/* ------------------------------------------------------------------------- */
#else
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
/*
* Code specific to the MPC860T setup.
@@ -1831,7 +2322,7 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
volatile fec_t *fecp;
@@ -1842,12 +2333,40 @@ static void __inline__ fec_localhw_setup(void)
fecp->fec_fun_code = 0x78000000;
}
-static void __inline__ fec_uncache(unsigned long addr)
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
pte_t *pte;
pte = va_to_pte(mem_addr);
pte_val(*pte) |= _PAGE_NO_CACHE;
flush_tlb_page(init_mm.mmap, mem_addr);
+ return addr;
+}
+
+/*
+ * * unmap memory erea started with addr from uncachable erea.
+ * */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
#endif
@@ -2073,10 +2592,14 @@ mii_link_interrupt(int irq, void * dev_id)
#if 0
disable_irq(fep->mii_irq); /* disable now, enable later */
#endif
-
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
-
+ /*
+ * Some board will trigger phy interrupt before phy enable.
+ * And at that moment , fep->phy is not initialized.
+ */
+ if (fep->phy) {
+ mii_do_cmd(dev, fep->phy->ack_int);
+ mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ }
return IRQ_HANDLED;
}
#endif
@@ -2094,6 +2617,8 @@ fec_enet_open(struct net_device *dev)
fep->sequence_done = 0;
fep->link = 0;
+ /* no phy, go full duplex, it's most likely a hub chip */
+ fec_restart(dev, 1);
if (fep->phy) {
mii_do_cmd(dev, fep->phy->ack_int);
mii_do_cmd(dev, fep->phy->config);
@@ -2111,19 +2636,14 @@ fec_enet_open(struct net_device *dev)
mii_do_cmd(dev, fep->phy->startup);
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
- } else {
- fep->link = 1; /* lets just try it and see */
- /* no phy, go full duplex, it's most likely a hub chip */
- fec_restart(dev, 1);
}
-
- netif_start_queue(dev);
+ /* Set the initial link state to true. A lot of hardware
+ * based on this device does not implement a PHY interrupt,
+ * so we are never notified of link change.
+ */
+ fep->link = 1;
fep->opened = 1;
+ netif_start_queue(dev);
return 0; /* Success */
}
@@ -2135,9 +2655,9 @@ fec_enet_close(struct net_device *dev)
/* Don't know what to do yet.
*/
fep->opened = 0;
- netif_stop_queue(dev);
- fec_stop(dev);
-
+ if (fep->link) {
+ fec_stop(dev);
+ }
return 0;
}
@@ -2248,6 +2768,7 @@ int __init fec_enet_init(struct net_device *dev)
unsigned long mem_addr;
volatile cbd_t *bdp;
cbd_t *cbd_base;
+ struct sk_buff* pskb;
volatile fec_t *fecp;
int i, j;
static int index = 0;
@@ -2256,6 +2777,8 @@ int __init fec_enet_init(struct net_device *dev)
if (index >= FEC_MAX_PORTS)
return -ENXIO;
+ fep->net = dev;
+
/* Allocate memory for buffer descriptors.
*/
mem_addr = __get_free_page(GFP_KERNEL);
@@ -2264,6 +2787,7 @@ int __init fec_enet_init(struct net_device *dev)
return -ENOMEM;
}
+ fep->cbd_mem_base = (void *)mem_addr;
spin_lock_init(&fep->hw_lock);
spin_lock_init(&fep->mii_lock);
@@ -2288,10 +2812,14 @@ int __init fec_enet_init(struct net_device *dev)
*/
fec_get_mac(dev);
- cbd_base = (cbd_t *)mem_addr;
- /* XXX: missing check for allocation failure */
+ cbd_base = (cbd_t *)fec_map_uncache(mem_addr, PAGE_SIZE);
+ if (cbd_base == NULL) {
+ free_page(mem_addr);
+ printk("FEC: map descriptor memory to uncacheable failed?\n");
+ return -ENOMEM;
+ }
- fec_uncache(mem_addr);
+ /* XXX: missing check for allocation failure */
/* Set receive and transmit descriptor base.
*/
@@ -2306,25 +2834,24 @@ int __init fec_enet_init(struct net_device *dev)
/* Initialize the receive buffer descriptors.
*/
bdp = fep->rx_bd_base;
- for (i=0; i<FEC_ENET_RX_PAGES; i++) {
-
- /* Allocate a page.
- */
- mem_addr = __get_free_page(GFP_KERNEL);
- /* XXX: missing check for allocation failure */
-
- fec_uncache(mem_addr);
-
- /* Initialize the BD for every fragment in the page.
- */
- for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp->cbd_bufaddr = __pa(mem_addr);
- mem_addr += FEC_ENET_RX_FRSIZE;
- bdp++;
+ for (i=0; i<RX_RING_SIZE; i++, bdp++) {
+ pskb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ if(pskb == NULL) {
+ for(; i>0; i--) {
+ if( fep->rx_skbuff[i-1] ) {
+ kfree_skb(fep->rx_skbuff[i-1]);
+ fep->rx_skbuff[i-1] = NULL;
+ }
+ }
+ printk("FEC: allocate skb fail when initializing rx buffer \n");
+ free_page(mem_addr);
+ return -ENOMEM;
}
+ fep->rx_skbuff[i] = pskb;
+ pskb->data = FEC_ADDR_ALIGNMENT(pskb->data);
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_bufaddr = __pa(pskb->data);
}
-
/* Set the last buffer to wrap.
*/
bdp--;
@@ -2357,19 +2884,23 @@ int __init fec_enet_init(struct net_device *dev)
/* Set receive and transmit descriptor base.
*/
- fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
- fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+ fecp->fec_r_des_start = __pa((uint)(fep->cbd_mem_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->cbd_mem_base + RX_RING_SIZE*sizeof(cbd_t)));
/* Install our interrupt handlers. This varies depending on
* the architecture.
*/
fec_request_intrs(dev);
+ /* Clear and enable interrupts */
+ fecp->fec_ievent = FEC_ENET_MASK;
+ fecp->fec_imask = FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII;
+
fecp->fec_grp_hash_table_high = 0;
fecp->fec_grp_hash_table_low = 0;
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
+ fecp->fec_r_des_active = 0x01000000;
#ifndef CONFIG_M5272
fecp->fec_hash_table_high = 0;
fecp->fec_hash_table_low = 0;
@@ -2392,10 +2923,6 @@ int __init fec_enet_init(struct net_device *dev)
/* setup MII interface */
fec_set_mii(dev, fep);
- /* Clear and enable interrupts */
- fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
-
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
*/
@@ -2427,9 +2954,15 @@ fec_restart(struct net_device *dev, int duplex)
fecp->fec_ecntrl = 1;
udelay(10);
+ /* Enable interrupts we wish to service.
+ */
+ fecp->fec_imask = FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII;
+
/* Clear any outstanding interrupt.
- */
- fecp->fec_ievent = 0xffc00000;
+ *
+ */
+ fecp->fec_ievent = FEC_ENET_MASK;
+
fec_enable_phy_intr();
/* Set station address.
@@ -2445,12 +2978,12 @@ fec_restart(struct net_device *dev, int duplex)
*/
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
- fec_localhw_setup();
+ fec_localhw_setup(dev);
/* Set receive and transmit descriptor base.
*/
- fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
- fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+ fecp->fec_r_des_start = __pa((uint)(fep->cbd_mem_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->cbd_mem_base + RX_RING_SIZE*sizeof(cbd_t)));
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
@@ -2517,11 +3050,10 @@ fec_restart(struct net_device *dev, int duplex)
/* And last, enable the transmit and receive processing.
*/
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
+ fecp->fec_r_des_active = 0x01000000;
- /* Enable interrupts we wish to service.
- */
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
+ if (fep->link)
+ netif_start_queue(dev);
}
static void
@@ -2530,6 +3062,8 @@ fec_stop(struct net_device *dev)
volatile fec_t *fecp;
struct fec_enet_private *fep;
+ netif_stop_queue(dev);
+
fep = netdev_priv(dev);
fecp = fep->hwp;
@@ -2565,6 +3099,7 @@ static int __init fec_enet_module_init(void)
DECLARE_MAC_BUF(mac);
printk("FEC ENET Version 0.2\n");
+ fec_arch_init();
for (i = 0; (i < FEC_MAX_PORTS); i++) {
dev = alloc_etherdev(sizeof(struct fec_enet_private));
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 292719daceff..cb21ef1a8052 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,7 @@
/****************************************************************************/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
/*
* Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models
@@ -56,6 +56,10 @@ typedef struct fec {
unsigned long fec_r_des_start; /* Receive descriptor ring */
unsigned long fec_x_des_start; /* Transmit descriptor ring */
unsigned long fec_r_buff_size; /* Maximum receive buff size */
+ unsigned long fec_reserved12[93];
+ unsigned long fec_miigsk_cfgr; /* MIIGSK config register */
+ unsigned long fec_reserved13;
+ unsigned long fec_miigsk_enr; /* MIIGSK enable register */
} fec_t;
#else
@@ -103,12 +107,22 @@ typedef struct fec {
/*
* Define the buffer descriptor structure.
*/
+/* Please see "Receive Buffer Descriptor Field Definitions" in Specification.
+ * It's LE.
+ */
+#ifdef CONFIG_ARCH_MXC
+typedef struct bufdesc {
+ unsigned short cbd_datlen; /* Data length */
+ unsigned short cbd_sc; /* Control and status info */
+ unsigned long cbd_bufaddr; /* Buffer address */
+} cbd_t;
+#else
typedef struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */
} cbd_t;
-
+#endif
/*
* The following definitions courtesy of commproc.h, which where
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index e6317557a531..e372f35ae038 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -342,5 +342,9 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
+config MXC_FIR
+ tristate "Freescale MXC FIR driver"
+ depends on ARCH_MXC && IRDA
+
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 5d20fde32a24..d32839cf9e73 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
+obj-$(CONFIG_MXC_FIR) += mxc_ir.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
diff --git a/drivers/net/irda/mxc_ir.c b/drivers/net/irda/mxc_ir.c
new file mode 100644
index 000000000000..7adcf5363078
--- /dev/null
+++ b/drivers/net/irda/mxc_ir.c
@@ -0,0 +1,1777 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * Based on sa1100_ir.c - Copyright 2000-2001 Russell King
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file mxc_ir.c
+ *
+ * @brief Driver for the Freescale Semiconductor MXC FIRI.
+ *
+ * This driver is based on drivers/net/irda/sa1100_ir.c, by Russell King.
+ *
+ * @ingroup FIRI
+ */
+
+/*
+ * Include Files
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+
+#include <net/irda/irda.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <mach/hardware.h>
+#include <mach/mxc_uart.h>
+#include "mxc_ir.h"
+
+#define IS_SIR(mi) ( (mi)->speed <= 115200 )
+#define IS_MIR(mi) ( (mi)->speed < 4000000 && (mi)->speed >= 576000 )
+#define IS_FIR(mi) ( (mi)->speed >= 4000000 )
+
+#define SDMA_START_DELAY() { \
+ volatile int j,k;\
+ int i;\
+ for(i=0;i<10000;i++)\
+ k=j;\
+ }
+
+#define IRDA_FRAME_SIZE_LIMIT 2047
+#define UART_BUFF_SIZE 14384
+
+#define UART4_UFCR_TXTL 16
+#define UART4_UFCR_RXTL 1
+
+#define FIRI_SDMA_TX
+#define FIRI_SDMA_RX
+
+/*!
+ * This structure is a way for the low level driver to define their own
+ * \b mxc_irda structure. This structure includes SK buffers, DMA buffers.
+ * and has other elements that are specifically required by this driver.
+ */
+struct mxc_irda {
+ /*!
+ * This keeps track of device is running or not
+ */
+ unsigned char open;
+
+ /*!
+ * This holds current FIRI communication speed
+ */
+ int speed;
+
+ /*!
+ * This holds FIRI communication speed for next packet
+ */
+ int newspeed;
+
+ /*!
+ * SK buffer for transmitter
+ */
+ struct sk_buff *txskb;
+
+ /*!
+ * SK buffer for receiver
+ */
+ struct sk_buff *rxskb;
+
+#ifdef FIRI_SDMA_RX
+ /*!
+ * SK buffer for tasklet
+ */
+ struct sk_buff *tskb;
+#endif
+
+ /*!
+ * DMA address for transmitter
+ */
+ dma_addr_t dma_rx_buff_phy;
+
+ /*!
+ * DMA address for receiver
+ */
+ dma_addr_t dma_tx_buff_phy;
+
+ /*!
+ * DMA Transmit buffer length
+ */
+ unsigned int dma_tx_buff_len;
+
+ /*!
+ * DMA channel for transmitter
+ */
+ int txdma_ch;
+
+ /*!
+ * DMA channel for receiver
+ */
+ int rxdma_ch;
+
+ /*!
+ * IrDA network device statistics
+ */
+ struct net_device_stats stats;
+
+ /*!
+ * The device structure used to get FIRI information
+ */
+ struct device *dev;
+
+ /*!
+ * Resource structure for UART, which will maintain base addresses and IRQs.
+ */
+ struct resource *uart_res;
+
+ /*!
+ * Base address of UART, used in readl and writel.
+ */
+ void *uart_base;
+
+ /*!
+ * Resource structure for FIRI, which will maintain base addresses and IRQs.
+ */
+ struct resource *firi_res;
+
+ /*!
+ * Base address of FIRI, used in readl and writel.
+ */
+ void *firi_base;
+
+ /*!
+ * UART IRQ number.
+ */
+ int uart_irq;
+
+ /*!
+ * Second UART IRQ number in case the interrupt lines are not muxed.
+ */
+ int uart_irq1;
+
+ /*!
+ * UART clock needed for baud rate calculations
+ */
+ struct clk *uart_clk;
+
+ /*!
+ * UART clock needed for baud rate calculations
+ */
+ unsigned long uart_clk_rate;
+
+ /*!
+ * FIRI clock needed for baud rate calculations
+ */
+ struct clk *firi_clk;
+
+ /*!
+ * FIRI IRQ number.
+ */
+ int firi_irq;
+
+ /*!
+ * IrLAP layer instance
+ */
+ struct irlap_cb *irlap;
+
+ /*!
+ * Driver supported baudrate capabilities
+ */
+ struct qos_info qos;
+
+ /*!
+ * Temporary transmit buffer used by the driver
+ */
+ iobuff_t tx_buff;
+
+ /*!
+ * Temporary receive buffer used by the driver
+ */
+ iobuff_t rx_buff;
+
+ /*!
+ * Pointer to platform specific data structure.
+ */
+ struct mxc_ir_platform_data *mxc_ir_plat;
+
+ /*!
+ * This holds the power management status of this module.
+ */
+ int suspend;
+
+};
+
+extern void gpio_firi_active(void *, unsigned int);
+extern void gpio_firi_inactive(void);
+extern void gpio_firi_init(void);
+
+void mxc_irda_firi_init(struct mxc_irda *si);
+#ifdef FIRI_SDMA_RX
+static void mxc_irda_fir_dma_rx_irq(void *id, int error_status,
+ unsigned int count);
+#endif
+#ifdef FIRI_SDMA_TX
+static void mxc_irda_fir_dma_tx_irq(void *id, int error_status,
+ unsigned int count);
+#endif
+
+/*!
+ * This function allocates and maps the receive buffer,
+ * unless it is already allocated.
+ *
+ * @param si FIRI device specific structure.
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_rx_alloc(struct mxc_irda *si)
+{
+#ifdef FIRI_SDMA_RX
+ mxc_dma_requestbuf_t dma_request;
+#endif
+ if (si->rxskb) {
+ return 0;
+ }
+
+ si->rxskb = alloc_skb(IRDA_FRAME_SIZE_LIMIT + 1, GFP_ATOMIC);
+
+ if (!si->rxskb) {
+ dev_err(si->dev, "mxc_ir: out of memory for RX SKB\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Align any IP headers that may be contained
+ * within the frame.
+ */
+ skb_reserve(si->rxskb, 1);
+
+#ifdef FIRI_SDMA_RX
+ si->dma_rx_buff_phy =
+ dma_map_single(si->dev, si->rxskb->data, IRDA_FRAME_SIZE_LIMIT,
+ DMA_FROM_DEVICE);
+
+ dma_request.num_of_bytes = IRDA_FRAME_SIZE_LIMIT;
+ dma_request.dst_addr = si->dma_rx_buff_phy;
+ dma_request.src_addr = si->firi_res->start;
+
+ mxc_dma_config(si->rxdma_ch, &dma_request, 1, MXC_DMA_MODE_READ);
+#endif
+ return 0;
+}
+
+/*!
+ * This function is called to disable the FIRI dma
+ *
+ * @param si FIRI port specific structure.
+ */
+static void mxc_irda_disabledma(struct mxc_irda *si)
+{
+ /* Stop all DMA activity. */
+#ifdef FIRI_SDMA_TX
+ mxc_dma_disable(si->txdma_ch);
+#endif
+#ifdef FIRI_SDMA_RX
+ mxc_dma_disable(si->rxdma_ch);
+#endif
+}
+
+/*!
+ * This function is called to set the IrDA communications speed.
+ *
+ * @param si FIRI specific structure.
+ * @param speed new Speed to be configured for.
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_set_speed(struct mxc_irda *si, int speed)
+{
+ unsigned long flags;
+ int ret = 0;
+ unsigned int num, denom, baud;
+ unsigned int cr;
+
+ dev_dbg(si->dev, "speed:%d\n", speed);
+ switch (speed) {
+ case 9600:
+ case 19200:
+ case 38400:
+ case 57600:
+ case 115200:
+ dev_dbg(si->dev, "starting SIR\n");
+ baud = speed;
+ if (IS_FIR(si)) {
+#ifdef FIRI_SDMA_RX
+ mxc_dma_disable(si->rxdma_ch);
+#endif
+ cr = readl(si->firi_base + FIRITCR);
+ cr &= ~FIRITCR_TE;
+ writel(cr, si->firi_base + FIRITCR);
+
+ cr = readl(si->firi_base + FIRIRCR);
+ cr &= ~FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+
+ }
+ local_irq_save(flags);
+
+ /* Disable Tx and Rx */
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ cr &= ~(MXC_UARTUCR2_RXEN | MXC_UARTUCR2_TXEN);
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+
+ gpio_firi_inactive();
+
+ num = baud / 100 - 1;
+ denom = si->uart_clk_rate / 1600 - 1;
+ if ((denom < 65536) && (si->uart_clk_rate > 1600)) {
+ writel(num, si->uart_base + MXC_UARTUBIR);
+ writel(denom, si->uart_base + MXC_UARTUBMR);
+ }
+
+ si->speed = speed;
+
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR1);
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR2);
+
+ /* Enable Receive Overrun and Data Ready interrupts. */
+ cr = readl(si->uart_base + MXC_UARTUCR4);
+ cr |= (MXC_UARTUCR4_OREN | MXC_UARTUCR4_DREN);
+ writel(cr, si->uart_base + MXC_UARTUCR4);
+
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ cr |= (MXC_UARTUCR2_RXEN | MXC_UARTUCR2_TXEN);
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+
+ local_irq_restore(flags);
+ break;
+ case 4000000:
+ local_irq_save(flags);
+
+ /* Disable Receive Overrun and Data Ready interrupts. */
+ cr = readl(si->uart_base + MXC_UARTUCR4);
+ cr &= ~(MXC_UARTUCR4_OREN | MXC_UARTUCR4_DREN);
+ writel(cr, si->uart_base + MXC_UARTUCR4);
+
+ /* Disable Tx and Rx */
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ cr &= ~(MXC_UARTUCR2_RXEN | MXC_UARTUCR2_TXEN);
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+
+ /*
+ * FIR configuration
+ */
+ mxc_irda_disabledma(si);
+
+ cr = readl(si->firi_base + FIRITCR);
+ cr &= ~FIRITCR_TE;
+ writel(cr, si->firi_base + FIRITCR);
+
+ gpio_firi_active(si->firi_base + FIRITCR, FIRITCR_TPP);
+
+ si->speed = speed;
+
+ cr = readl(si->firi_base + FIRIRCR);
+ cr |= FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+
+ dev_dbg(si->dev, "Going for fast IRDA ...\n");
+ ret = mxc_irda_rx_alloc(si);
+
+ /* clear RX status register */
+ writel(0xFFFF, si->firi_base + FIRIRSR);
+#ifdef FIRI_SDMA_RX
+ if (si->rxskb) {
+ mxc_dma_enable(si->rxdma_ch);
+ }
+#endif
+ local_irq_restore(flags);
+
+ break;
+ default:
+ dev_err(si->dev, "speed not supported by FIRI\n");
+ break;
+ }
+
+ return ret;
+}
+
+/*!
+ * This function is called to set the IrDA communications speed.
+ *
+ * @param si FIRI specific structure.
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static inline int mxc_irda_fir_error(struct mxc_irda *si)
+{
+ struct sk_buff *skb = si->rxskb;
+ unsigned int dd_error, crc_error, overrun_error;
+ unsigned int sr;
+
+ if (!skb) {
+ dev_err(si->dev, "no skb!\n");
+ return -1;
+ }
+
+ sr = readl(si->firi_base + FIRIRSR);
+ dd_error = sr & FIRIRSR_DDE;
+ crc_error = sr & FIRIRSR_CRCE;
+ overrun_error = sr & FIRIRSR_RFO;
+
+ if (!(dd_error | crc_error | overrun_error)) {
+ return 0;
+ }
+ dev_err(si->dev, "dde,crce,rfo=%d,%d,%d.\n", dd_error, crc_error,
+ overrun_error);
+ si->stats.rx_errors++;
+ if (crc_error) {
+ si->stats.rx_crc_errors++;
+ }
+ if (dd_error) {
+ si->stats.rx_frame_errors++;
+ }
+ if (overrun_error) {
+ si->stats.rx_frame_errors++;
+ }
+ writel(sr, si->firi_base + FIRIRSR);
+
+ return -1;
+}
+
+#ifndef FIRI_SDMA_RX
+/*!
+ * FIR interrupt service routine to handle receive.
+ *
+ * @param dev pointer to the net_device structure
+ */
+void mxc_irda_fir_irq_rx(struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ struct sk_buff *skb = si->rxskb;
+ unsigned int sr, len;
+ int i;
+ unsigned char *p = skb->data;
+
+ /*
+ * Deal with any receive errors.
+ */
+ if (mxc_irda_fir_error(si) != 0) {
+ return;
+ }
+
+ sr = readl(si->firi_base + FIRIRSR);
+
+ if (!(sr & FIRIRSR_RPE)) {
+ return;
+ }
+
+ /*
+ * Coming here indicates that fir rx packet has been successfully recieved.
+ * And No error happened so far.
+ */
+ writel(sr | FIRIRSR_RPE, si->firi_base + FIRIRSR);
+
+ len = (sr & FIRIRSR_RFP) >> 8;
+
+ /* 4 bytes of CRC */
+ len -= 4;
+
+ skb_put(skb, len);
+
+ for (i = 0; i < len; i++) {
+ *p++ = readb(si->firi_base + FIRIRXFIFO);
+ }
+
+ /* Discard the four CRC bytes */
+ for (i = 0; i < 4; i++) {
+ readb(si->firi_base + FIRIRXFIFO);
+ }
+
+ /*
+ * Deal with the case of packet complete.
+ */
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+ skb->protocol = htons(ETH_P_IRDA);
+ si->stats.rx_packets++;
+ si->stats.rx_bytes += len;
+ netif_rx(skb);
+
+ si->rxskb = NULL;
+ mxc_irda_rx_alloc(si);
+
+ writel(0xFFFF, si->firi_base + FIRIRSR);
+
+}
+#endif
+
+/*!
+ * FIR interrupt service routine to handle transmit.
+ *
+ * @param dev pointer to the net_device structure
+ */
+void mxc_irda_fir_irq_tx(struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ struct sk_buff *skb = si->txskb;
+ unsigned int cr, sr;
+
+ sr = readl(si->firi_base + FIRITSR);
+ writel(sr, si->firi_base + FIRITSR);
+
+ if (sr & FIRITSR_TC) {
+
+#ifdef FIRI_SDMA_TX
+ mxc_dma_disable(si->txdma_ch);
+#endif
+ cr = readl(si->firi_base + FIRITCR);
+ cr &= ~(FIRITCR_TCIE | FIRITCR_TE);
+ writel(cr, si->firi_base + FIRITCR);
+
+ if (si->newspeed) {
+ mxc_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+ si->txskb = NULL;
+
+ cr = readl(si->firi_base + FIRIRCR);
+ cr |= FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+
+ writel(0xFFFF, si->firi_base + FIRIRSR);
+ /*
+ * Account and free the packet.
+ */
+ if (skb) {
+#ifdef FIRI_SDMA_TX
+ dma_unmap_single(si->dev, si->dma_tx_buff_phy, skb->len,
+ DMA_TO_DEVICE);
+#endif
+ si->stats.tx_packets++;
+ si->stats.tx_bytes += skb->len;
+ dev_kfree_skb_irq(skb);
+ }
+ /*
+ * Make sure that the TX queue is available for sending
+ * (for retries). TX has priority over RX at all times.
+ */
+ netif_wake_queue(dev);
+ }
+}
+
+/*!
+ * This is FIRI interrupt handler.
+ *
+ * @param dev pointer to the net_device structure
+ */
+void mxc_irda_fir_irq(struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ unsigned int sr1, sr2;
+
+ sr1 = readl(si->firi_base + FIRIRSR);
+ sr2 = readl(si->firi_base + FIRITSR);
+
+ if (sr2 & FIRITSR_TC)
+ mxc_irda_fir_irq_tx(dev);
+#ifndef FIRI_SDMA_RX
+ if (sr1 & (FIRIRSR_RPE | FIRIRSR_RFO))
+ mxc_irda_fir_irq_rx(dev);
+#endif
+
+}
+
+/*!
+ * This is the SIR transmit routine.
+ *
+ * @param si FIRI specific structure.
+ *
+ * @param dev pointer to the net_device structure
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_sir_txirq(struct mxc_irda *si, struct net_device *dev)
+{
+ unsigned int sr1, sr2, cr;
+ unsigned int status;
+
+ sr1 = readl(si->uart_base + MXC_UARTUSR1);
+ sr2 = readl(si->uart_base + MXC_UARTUSR2);
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+
+ /*
+ * Echo cancellation for IRDA Transmit chars
+ * Disable the receiver and enable Transmit complete.
+ */
+ cr &= ~MXC_UARTUCR2_RXEN;
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+ cr = readl(si->uart_base + MXC_UARTUCR4);
+ cr |= MXC_UARTUCR4_TCEN;
+ writel(cr, si->uart_base + MXC_UARTUCR4);
+
+ while ((sr1 & MXC_UARTUSR1_TRDY) && si->tx_buff.len) {
+
+ writel(*si->tx_buff.data++, si->uart_base + MXC_UARTUTXD);
+ si->tx_buff.len -= 1;
+ sr1 = readl(si->uart_base + MXC_UARTUSR1);
+ }
+
+ if (si->tx_buff.len == 0) {
+ si->stats.tx_packets++;
+ si->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
+
+ /*Yoohoo...we are done...Lets stop Tx */
+ cr = readl(si->uart_base + MXC_UARTUCR1);
+ cr &= ~MXC_UARTUCR1_TRDYEN;
+ writel(cr, si->uart_base + MXC_UARTUCR1);
+
+ do {
+ status = readl(si->uart_base + MXC_UARTUSR2);
+ } while (!(status & MXC_UARTUSR2_TXDC));
+
+ if (si->newspeed) {
+ mxc_irda_set_speed(si, si->newspeed);
+ si->newspeed = 0;
+ }
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+
+ /* Is the transmit complete to reenable the receiver? */
+ if (status & MXC_UARTUSR2_TXDC) {
+
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ cr |= MXC_UARTUCR2_RXEN;
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+ /* Disable the Transmit complete interrupt bit */
+ cr = readl(si->uart_base + MXC_UARTUCR4);
+ cr &= ~MXC_UARTUCR4_TCEN;
+ writel(cr, si->uart_base + MXC_UARTUCR4);
+ }
+ }
+
+ return 0;
+}
+
+/*!
+ * This is the SIR receive routine.
+ *
+ * @param si FIRI specific structure.
+ *
+ * @param dev pointer to the net_device structure
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_sir_rxirq(struct mxc_irda *si, struct net_device *dev)
+{
+ unsigned int data, status;
+ volatile unsigned int sr2;
+
+ sr2 = readl(si->uart_base + MXC_UARTUSR2);
+ while ((sr2 & MXC_UARTUSR2_RDR) == 1) {
+ data = readl(si->uart_base + MXC_UARTURXD);
+ status = data & 0xf400;
+ if (status & MXC_UARTURXD_ERR) {
+ dev_err(si->dev, "Receive an incorrect data =0x%x.\n",
+ data);
+ si->stats.rx_errors++;
+ if (status & MXC_UARTURXD_OVRRUN) {
+ si->stats.rx_fifo_errors++;
+ dev_err(si->dev, "Rx overrun.\n");
+ }
+ if (status & MXC_UARTURXD_FRMERR) {
+ si->stats.rx_frame_errors++;
+ dev_err(si->dev, "Rx frame error.\n");
+ }
+ if (status & MXC_UARTURXD_PRERR) {
+ dev_err(si->dev, "Rx parity error.\n");
+ }
+ /* Other: it is the Break char.
+ * Do nothing for it. throw out the data.
+ */
+ async_unwrap_char(dev, &si->stats, &si->rx_buff,
+ (data & 0xFF));
+ } else {
+ /* It is correct data. */
+ data &= 0xFF;
+ async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
+
+ dev->last_rx = jiffies;
+ }
+ sr2 = readl(si->uart_base + MXC_UARTUSR2);
+
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR1);
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR2);
+ } /*while */
+ return 0;
+
+}
+
+static irqreturn_t mxc_irda_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mxc_irda *si = dev->priv;
+
+ if (IS_FIR(si)) {
+ mxc_irda_fir_irq(dev);
+ return IRQ_HANDLED;
+ }
+
+ if (readl(si->uart_base + MXC_UARTUCR2) & MXC_UARTUCR2_RXEN) {
+ mxc_irda_sir_rxirq(si, dev);
+ }
+ if ((readl(si->uart_base + MXC_UARTUCR1) & MXC_UARTUCR1_TRDYEN) &&
+ (readl(si->uart_base + MXC_UARTUSR1) & MXC_UARTUSR1_TRDY)) {
+ mxc_irda_sir_txirq(si, dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mxc_irda_tx_irq(int irq, void *dev_id)
+{
+
+ struct net_device *dev = dev_id;
+ struct mxc_irda *si = dev->priv;
+
+ mxc_irda_sir_txirq(si, dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mxc_irda_rx_irq(int irq, void *dev_id)
+{
+
+ struct net_device *dev = dev_id;
+ struct mxc_irda *si = dev->priv;
+
+ /* Clear the aging timer bit */
+ writel(MXC_UARTUSR1_AGTIM, si->uart_base + MXC_UARTUSR1);
+
+ mxc_irda_sir_rxirq(si, dev);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef FIRI_SDMA_RX
+struct tasklet_struct dma_rx_tasklet;
+
+static void mxc_irda_rx_task(unsigned long tparam)
+{
+ struct mxc_irda *si = (struct mxc_irda *)tparam;
+ struct sk_buff *lskb = si->tskb;
+
+ si->tskb = NULL;
+ if (lskb) {
+ lskb->mac_header = lskb->data;
+ lskb->protocol = htons(ETH_P_IRDA);
+ netif_rx(lskb);
+ }
+}
+
+/*!
+ * Receiver DMA callback routine.
+ *
+ * @param id pointer to network device structure
+ * @param error_status used to pass error status to this callback function
+ * @param count number of bytes received
+ */
+static void mxc_irda_fir_dma_rx_irq(void *id, int error_status,
+ unsigned int count)
+{
+ struct net_device *dev = id;
+ struct mxc_irda *si = dev->priv;
+ struct sk_buff *skb = si->rxskb;
+ unsigned int cr;
+ unsigned int len;
+
+ cr = readl(si->firi_base + FIRIRCR);
+ cr &= ~FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+ cr = readl(si->firi_base + FIRIRCR);
+ cr |= FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+ len = count - 4; /* remove 4 bytes for CRC */
+ skb_put(skb, len);
+ skb->dev = dev;
+ si->tskb = skb;
+ tasklet_schedule(&dma_rx_tasklet);
+
+ if (si->dma_rx_buff_phy != 0)
+ dma_unmap_single(si->dev, si->dma_rx_buff_phy,
+ IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE);
+
+ si->rxskb = NULL;
+ mxc_irda_rx_alloc(si);
+
+ SDMA_START_DELAY();
+ writel(0xFFFF, si->firi_base + FIRIRSR);
+
+ if (si->rxskb) {
+ mxc_dma_enable(si->rxdma_ch);
+ }
+}
+#endif
+
+#ifdef FIRI_SDMA_TX
+/*!
+ * This function is called by SDMA Interrupt Service Routine to indicate
+ * requested DMA transfer is completed.
+ *
+ * @param id pointer to network device structure
+ * @param error_status used to pass error status to this callback function
+ * @param count number of bytes sent
+ */
+static void mxc_irda_fir_dma_tx_irq(void *id, int error_status,
+ unsigned int count)
+{
+ struct net_device *dev = id;
+ struct mxc_irda *si = dev->priv;
+
+ mxc_dma_disable(si->txdma_ch);
+}
+#endif
+
+/*!
+ * This function is called by Linux IrDA network subsystem to
+ * transmit the Infrared data packet. The TX DMA channel is configured
+ * to transfer SK buffer data to FIRI TX FIFO along with DMA transfer
+ * completion routine.
+ *
+ * @param skb The packet that is queued to be sent
+ * @param dev net_device structure.
+ *
+ * @return The function returns 0 on success and a negative value on
+ * failure.
+ */
+static int mxc_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ int speed = irda_get_next_speed(skb);
+ unsigned int cr;
+
+ /*
+ * Does this packet contain a request to change the interface
+ * speed? If so, remember it until we complete the transmission
+ * of this frame.
+ */
+ if (speed != si->speed && speed != -1) {
+ si->newspeed = speed;
+ }
+
+ /* If this is an empty frame, we can bypass a lot. */
+ if (skb->len == 0) {
+ if (si->newspeed) {
+ si->newspeed = 0;
+ mxc_irda_set_speed(si, speed);
+ }
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ /* We must not be transmitting... */
+ netif_stop_queue(dev);
+ if (IS_SIR(si)) {
+
+ si->tx_buff.data = si->tx_buff.head;
+ si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
+ si->tx_buff.truesize);
+ cr = readl(si->uart_base + MXC_UARTUCR1);
+ cr |= MXC_UARTUCR1_TRDYEN;
+ writel(cr, si->uart_base + MXC_UARTUCR1);
+ dev_kfree_skb(skb);
+ } else {
+ unsigned int mtt = irda_get_mtt(skb);
+ unsigned char *p = skb->data;
+ unsigned int skb_len = skb->len;
+#ifdef FIRI_SDMA_TX
+ mxc_dma_requestbuf_t dma_request;
+#else
+ unsigned int i, sr;
+#endif
+
+ skb_len = skb_len + ((4 - (skb_len % 4)) % 4);
+
+ if (si->txskb) {
+ BUG();
+ }
+ si->txskb = skb;
+
+ /*
+ * If we have a mean turn-around time, impose the specified
+ * specified delay. We could shorten this by timing from
+ * the point we received the packet.
+ */
+ if (mtt) {
+ udelay(mtt);
+ }
+
+ cr = readl(si->firi_base + FIRIRCR);
+ cr &= ~FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+
+ writel(skb->len - 1, si->firi_base + FIRITCTR);
+
+#ifdef FIRI_SDMA_TX
+ /*
+ * Configure DMA Tx Channel for source and destination addresses,
+ * Number of bytes in SK buffer to transfer and Transfer complete
+ * callback function.
+ */
+ si->dma_tx_buff_len = skb_len;
+ si->dma_tx_buff_phy =
+ dma_map_single(si->dev, p, skb_len, DMA_TO_DEVICE);
+
+ dma_request.num_of_bytes = skb_len;
+ dma_request.dst_addr = si->firi_res->start + FIRITXFIFO;
+ dma_request.src_addr = si->dma_tx_buff_phy;
+
+ mxc_dma_config(si->txdma_ch, &dma_request, 1,
+ MXC_DMA_MODE_WRITE);
+
+ mxc_dma_enable(si->txdma_ch);
+#endif
+ cr = readl(si->firi_base + FIRITCR);
+ cr |= FIRITCR_TCIE;
+ writel(cr, si->firi_base + FIRITCR);
+
+ cr |= FIRITCR_TE;
+ writel(cr, si->firi_base + FIRITCR);
+
+#ifndef FIRI_SDMA_TX
+ for (i = 0; i < skb->len;) {
+ sr = readl(si->firi_base + FIRITSR);
+ /* TFP = number of bytes in the TX FIFO for the
+ * Transmitter
+ * */
+ if ((sr >> 8) < 128) {
+ writeb(*p, si->firi_base + FIRITXFIFO);
+ p++;
+ i++;
+ }
+ }
+#endif
+ }
+
+ dev->trans_start = jiffies;
+ return 0;
+}
+
+/*!
+ * This function handles network interface ioctls passed to this driver..
+ *
+ * @param dev net device structure
+ * @param ifreq user request data
+ * @param cmd command issued
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+{
+ struct if_irda_req *rq = (struct if_irda_req *)ifreq;
+ struct mxc_irda *si = dev->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd) {
+ /* This function will be used by IrLAP to change the speed */
+ case SIOCSBANDWIDTH:
+ dev_dbg(si->dev, "%s:with cmd SIOCSBANDWIDTH\n", __FUNCTION__);
+ if (capable(CAP_NET_ADMIN)) {
+ /*
+ * We are unable to set the speed if the
+ * device is not running.
+ */
+ if (si->open) {
+ ret = mxc_irda_set_speed(si, rq->ifr_baudrate);
+ } else {
+ dev_err(si->dev, "mxc_ir_ioctl: SIOCSBANDWIDTH:\
+ !netif_running\n");
+ ret = 0;
+ }
+ }
+ break;
+ case SIOCSMEDIABUSY:
+ dev_dbg(si->dev, "%s:with cmd SIOCSMEDIABUSY\n", __FUNCTION__);
+ ret = -EPERM;
+ if (capable(CAP_NET_ADMIN)) {
+ irda_device_set_media_busy(dev, TRUE);
+ ret = 0;
+ }
+ break;
+ case SIOCGRECEIVING:
+ rq->ifr_receiving =
+ IS_SIR(si) ? si->rx_buff.state != OUTSIDE_FRAME : 0;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*!
+ * Kernel interface routine to get current statistics of the device
+ * which includes the number bytes/packets transmitted/received,
+ * receive errors, CRC errors, framing errors etc.
+ *
+ * @param dev the net_device structure
+ *
+ * @return This function returns IrDA network statistics
+ */
+static struct net_device_stats *mxc_irda_stats(struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ return &si->stats;
+}
+
+/*!
+ * FIRI init function
+ *
+ * @param si FIRI device specific structure.
+ */
+void mxc_irda_firi_init(struct mxc_irda *si)
+{
+ unsigned int firi_baud, osf = 6;
+ unsigned int tcr, rcr, cr;
+
+ si->firi_clk = clk_get(si->dev, "firi_clk");
+ firi_baud = clk_round_rate(si->firi_clk, 48004500);
+ if ((firi_baud < 47995500) ||
+ (clk_set_rate(si->firi_clk, firi_baud) < 0)) {
+ dev_err(si->dev, "Unable to set FIR clock to 48MHz.\n");
+ return;
+ }
+ clk_enable(si->firi_clk);
+
+ writel(0xFFFF, si->firi_base + FIRITSR);
+ writel(0xFFFF, si->firi_base + FIRIRSR);
+ writel(0x00, si->firi_base + FIRITCR);
+ writel(0x00, si->firi_base + FIRIRCR);
+
+ /* set _BL & _OSF */
+ cr = (osf - 1) | (16 << 5);
+ writel(cr, si->firi_base + FIRICR);
+
+#ifdef FIRI_SDMA_TX
+ tcr =
+ FIRITCR_TDT_FIR | FIRITCR_TM_FIR | FIRITCR_TCIE |
+ FIRITCR_PCF | FIRITCR_PC;
+#else
+ tcr = FIRITCR_TM_FIR | FIRITCR_TCIE | FIRITCR_PCF | FIRITCR_PC;
+#endif
+
+#ifdef FIRI_SDMA_RX
+ rcr =
+ FIRIRCR_RPEDE | FIRIRCR_RM_FIR | FIRIRCR_RDT_FIR |
+ FIRIRCR_RPA | FIRIRCR_RPP;
+#else
+ rcr =
+ FIRIRCR_RPEDE | FIRIRCR_RM_FIR | FIRIRCR_RDT_FIR | FIRIRCR_RPEIE |
+ FIRIRCR_RPA | FIRIRCR_PAIE | FIRIRCR_RFOIE | FIRIRCR_RPP;
+#endif
+
+ writel(tcr, si->firi_base + FIRITCR);
+ writel(rcr, si->firi_base + FIRIRCR);
+ cr = 0;
+ writel(cr, si->firi_base + FIRITCTR);
+}
+
+/*!
+ * This function initialises the UART.
+ *
+ * @param si FIRI port specific structure.
+ *
+ * @return The function returns 0 on success.
+ */
+static int mxc_irda_uart_init(struct mxc_irda *si)
+{
+ unsigned int per_clk;
+ unsigned int num, denom, baud, ufcr = 0;
+ unsigned int cr;
+ int d = 1;
+ int uart_ir_mux = 0;
+
+ if (si->mxc_ir_plat)
+ uart_ir_mux = si->mxc_ir_plat->uart_ir_mux;
+ /*
+ * Clear Status Registers 1 and 2
+ **/
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR1);
+ writel(0xFFFF, si->uart_base + MXC_UARTUSR2);
+
+ /* Configure the IOMUX for the UART */
+ gpio_firi_init();
+
+ per_clk = clk_get_rate(si->uart_clk);
+ baud = per_clk / 16;
+ if (baud > 1500000) {
+ baud = 1500000;
+ d = per_clk / ((baud * 16) + 1000);
+ if (d > 6) {
+ d = 6;
+ }
+ }
+ clk_enable(si->uart_clk);
+
+ si->uart_clk_rate = per_clk / d;
+ writel(si->uart_clk_rate / 1000, si->uart_base + MXC_UARTONEMS);
+
+ writel(si->mxc_ir_plat->ir_rx_invert | MXC_UARTUCR4_IRSC,
+ si->uart_base + MXC_UARTUCR4);
+
+ if (uart_ir_mux) {
+ writel(MXC_UARTUCR3_RXDMUXSEL | si->mxc_ir_plat->ir_tx_invert |
+ MXC_UARTUCR3_DSR, si->uart_base + MXC_UARTUCR3);
+ } else {
+ writel(si->mxc_ir_plat->ir_tx_invert | MXC_UARTUCR3_DSR,
+ si->uart_base + MXC_UARTUCR3);
+ }
+
+ writel(MXC_UARTUCR2_IRTS | MXC_UARTUCR2_CTS | MXC_UARTUCR2_WS |
+ MXC_UARTUCR2_ATEN | MXC_UARTUCR2_TXEN | MXC_UARTUCR2_RXEN,
+ si->uart_base + MXC_UARTUCR2);
+ /* Wait till we are out of software reset */
+ do {
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ } while (!(cr & MXC_UARTUCR2_SRST));
+
+ ufcr |= (UART4_UFCR_TXTL << MXC_UARTUFCR_TXTL_OFFSET) |
+ ((6 - d) << MXC_UARTUFCR_RFDIV_OFFSET) | UART4_UFCR_RXTL;
+ writel(ufcr, si->uart_base + MXC_UARTUFCR);
+
+ writel(MXC_UARTUCR1_UARTEN | MXC_UARTUCR1_IREN,
+ si->uart_base + MXC_UARTUCR1);
+
+ baud = 9600;
+ num = baud / 100 - 1;
+ denom = si->uart_clk_rate / 1600 - 1;
+
+ if ((denom < 65536) && (si->uart_clk_rate > 1600)) {
+ writel(num, si->uart_base + MXC_UARTUBIR);
+ writel(denom, si->uart_base + MXC_UARTUBMR);
+ }
+
+ writel(0x0000, si->uart_base + MXC_UARTUTS);
+ return 0;
+
+}
+
+/*!
+ * This function enables FIRI port.
+ *
+ * @param si FIRI port specific structure.
+ *
+ * @return The function returns 0 on success and a non-zero value on
+ * failure.
+ */
+static int mxc_irda_startup(struct mxc_irda *si)
+{
+ int ret = 0;
+
+ mxc_irda_uart_init(si);
+ mxc_irda_firi_init(si);
+
+ /* configure FIRI device for speed */
+ ret = mxc_irda_set_speed(si, si->speed = 9600);
+
+ return ret;
+}
+
+/*!
+ * When an ifconfig is issued which changes the device flag to include
+ * IFF_UP this function is called. It is only called when the change
+ * occurs, not when the interface remains up. The function grabs the interrupt
+ * resources and registers FIRI interrupt service routines, requests for DMA
+ * channels, configures the DMA channel. It then initializes the IOMUX
+ * registers to configure the pins for FIRI signals and finally initializes the
+ * various FIRI registers and enables the port for reception.
+ *
+ * @param dev net device structure that is being opened
+ *
+ * @return The function returns 0 for a successful open and non-zero value
+ * on failure.
+ */
+static int mxc_irda_start(struct net_device *dev)
+{
+ struct mxc_irda *si = dev->priv;
+ int err;
+ int ints_muxed = 0;
+ mxc_dma_device_t dev_id = 0;
+
+ if (si->uart_irq == si->uart_irq1)
+ ints_muxed = 1;
+
+ si->speed = 9600;
+
+ if (si->uart_irq == si->firi_irq) {
+ err =
+ request_irq(si->uart_irq, mxc_irda_irq, 0, dev->name, dev);
+ if (err) {
+ dev_err(si->dev, "%s:Failed to request the IRQ\n",
+ __FUNCTION__);
+ return err;
+ }
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(si->uart_irq);
+ } else {
+ err =
+ request_irq(si->firi_irq, mxc_irda_irq, 0, dev->name, dev);
+ if (err) {
+ dev_err(si->dev, "%s:Failed to request FIRI IRQ\n",
+ __FUNCTION__);
+ return err;
+ }
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(si->firi_irq);
+ if (ints_muxed) {
+
+ err = request_irq(si->uart_irq, mxc_irda_irq, 0,
+ dev->name, dev);
+ if (err) {
+ dev_err(si->dev,
+ "%s:Failed to request UART IRQ\n",
+ __FUNCTION__);
+ goto err_irq1;
+ }
+ /*
+ * The interrupt must remain disabled for now.
+ */
+ disable_irq(si->uart_irq);
+ } else {
+ err = request_irq(si->uart_irq, mxc_irda_tx_irq, 0,
+ dev->name, dev);
+ if (err) {
+ dev_err(si->dev,
+ "%s:Failed to request UART IRQ\n",
+ __FUNCTION__);
+ goto err_irq1;
+ }
+ err = request_irq(si->uart_irq1, mxc_irda_rx_irq, 0,
+ dev->name, dev);
+ if (err) {
+ dev_err(si->dev,
+ "%s:Failed to request UART1 IRQ\n",
+ __FUNCTION__);
+ goto err_irq2;
+ }
+ /*
+ * The interrupts must remain disabled for now.
+ */
+ disable_irq(si->uart_irq);
+ disable_irq(si->uart_irq1);
+ }
+ }
+#ifdef FIRI_SDMA_RX
+ dev_id = MXC_DMA_FIR_RX;
+ si->rxdma_ch = mxc_dma_request(dev_id, "MXC FIRI RX");
+ if (si->rxdma_ch < 0) {
+ dev_err(si->dev, "Cannot allocate FIR DMA channel\n");
+ goto err_rx_dma;
+ }
+ mxc_dma_callback_set(si->rxdma_ch, mxc_irda_fir_dma_rx_irq,
+ (void *)dev_get_drvdata(si->dev));
+#endif
+#ifdef FIRI_SDMA_TX
+
+ dev_id = MXC_DMA_FIR_TX;
+ si->txdma_ch = mxc_dma_request(dev_id, "MXC FIRI TX");
+ if (si->txdma_ch < 0) {
+ dev_err(si->dev, "Cannot allocate FIR DMA channel\n");
+ goto err_tx_dma;
+ }
+ mxc_dma_callback_set(si->txdma_ch, mxc_irda_fir_dma_tx_irq,
+ (void *)dev_get_drvdata(si->dev));
+#endif
+ /* Setup the serial port port for the initial speed. */
+ err = mxc_irda_startup(si);
+ if (err) {
+ goto err_startup;
+ }
+
+ /* Open a new IrLAP layer instance. */
+ si->irlap = irlap_open(dev, &si->qos, "mxc");
+ err = -ENOMEM;
+ if (!si->irlap) {
+ goto err_irlap;
+ }
+
+ /* Now enable the interrupt and start the queue */
+ si->open = 1;
+ si->suspend = 0;
+
+ if (si->uart_irq == si->firi_irq) {
+ enable_irq(si->uart_irq);
+ } else {
+ enable_irq(si->firi_irq);
+ if (ints_muxed == 1) {
+ enable_irq(si->uart_irq);
+ } else {
+ enable_irq(si->uart_irq);
+ enable_irq(si->uart_irq1);
+ }
+ }
+
+ netif_start_queue(dev);
+ return 0;
+
+ err_irlap:
+ si->open = 0;
+ mxc_irda_disabledma(si);
+ err_startup:
+#ifdef FIRI_SDMA_TX
+ mxc_dma_free(si->txdma_ch);
+ err_tx_dma:
+#endif
+#ifdef FIRI_SDMA_RX
+ mxc_dma_free(si->rxdma_ch);
+ err_rx_dma:
+#endif
+ if (si->uart_irq1 && !ints_muxed)
+ free_irq(si->uart_irq1, dev);
+ err_irq2:
+ if (si->uart_irq != si->firi_irq)
+ free_irq(si->uart_irq, dev);
+ err_irq1:
+ if (si->firi_irq)
+ free_irq(si->firi_irq, dev);
+ return err;
+}
+
+/*!
+ * This function is called when IFF_UP flag has been cleared by the user via
+ * the ifconfig irda0 down command. This function stops any further
+ * transmissions being queued, and then disables the interrupts.
+ * Finally it resets the device.
+ * @param dev the net_device structure
+ *
+ * @return int the function always returns 0 indicating a success.
+ */
+static int mxc_irda_stop(struct net_device *dev)
+{
+ struct mxc_irda *si = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Stop IrLAP */
+ if (si->irlap) {
+ irlap_close(si->irlap);
+ si->irlap = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+ /*Save flags and disable the FIRI interrupts.. */
+ if (si->open) {
+ local_irq_save(flags);
+ disable_irq(si->uart_irq);
+ free_irq(si->uart_irq, dev);
+ if (si->uart_irq != si->firi_irq) {
+ disable_irq(si->firi_irq);
+ free_irq(si->firi_irq, dev);
+ if (si->uart_irq1 != si->uart_irq) {
+ disable_irq(si->uart_irq1);
+ free_irq(si->uart_irq1, dev);
+ }
+ }
+ local_irq_restore(flags);
+ si->open = 0;
+ }
+#ifdef FIRI_SDMA_RX
+ if (si->rxdma_ch) {
+ mxc_dma_disable(si->rxdma_ch);
+ mxc_dma_free(si->rxdma_ch);
+ if (si->dma_rx_buff_phy) {
+ dma_unmap_single(si->dev, si->dma_rx_buff_phy,
+ IRDA_FRAME_SIZE_LIMIT,
+ DMA_FROM_DEVICE);
+ si->dma_rx_buff_phy = 0;
+ }
+ si->rxdma_ch = 0;
+ }
+ tasklet_kill(&dma_rx_tasklet);
+#endif
+#ifdef FIRI_SDMA_TX
+ if (si->txdma_ch) {
+ mxc_dma_disable(si->txdma_ch);
+ mxc_dma_free(si->txdma_ch);
+ if (si->dma_tx_buff_phy) {
+ dma_unmap_single(si->dev, si->dma_tx_buff_phy,
+ si->dma_tx_buff_len, DMA_TO_DEVICE);
+ si->dma_tx_buff_phy = 0;
+ }
+ si->txdma_ch = 0;
+ }
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/*!
+ * This function is called to put the FIRI in a low power state. Refer to the
+ * document driver-model/driver.txt in the kernel source tree for more
+ * information.
+ *
+ * @param pdev the device structure used to give information on which FIRI
+ * to suspend
+ * @param state the power state the device is entering
+ *
+ * @return The function always returns 0.
+ */
+static int mxc_irda_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct mxc_irda *si = netdev_priv(ndev);
+ unsigned int cr;
+ unsigned long flags;
+
+ if (!si) {
+ return 0;
+ }
+ if (si->suspend == 1) {
+ dev_err(si->dev,
+ " suspend - Device is already suspended ... \n");
+ return 0;
+ }
+ if (si->open) {
+
+ netif_device_detach(ndev);
+ mxc_irda_disabledma(si);
+
+ /*Save flags and disable the FIRI interrupts.. */
+ local_irq_save(flags);
+ disable_irq(si->uart_irq);
+ if (si->uart_irq != si->firi_irq) {
+ disable_irq(si->firi_irq);
+ if (si->uart_irq != si->uart_irq1) {
+ disable_irq(si->uart_irq1);
+ }
+ }
+ local_irq_restore(flags);
+
+ /* Disable Tx and Rx and then disable the UART clock */
+ cr = readl(si->uart_base + MXC_UARTUCR2);
+ cr &= ~(MXC_UARTUCR2_TXEN | MXC_UARTUCR2_RXEN);
+ writel(cr, si->uart_base + MXC_UARTUCR2);
+ cr = readl(si->uart_base + MXC_UARTUCR1);
+ cr &= ~MXC_UARTUCR1_UARTEN;
+ writel(cr, si->uart_base + MXC_UARTUCR1);
+ clk_disable(si->uart_clk);
+
+ /*Disable Tx and Rx for FIRI and then disable the FIRI clock.. */
+ cr = readl(si->firi_base + FIRITCR);
+ cr &= ~FIRITCR_TE;
+ writel(cr, si->firi_base + FIRITCR);
+ cr = readl(si->firi_base + FIRIRCR);
+ cr &= ~FIRIRCR_RE;
+ writel(cr, si->firi_base + FIRIRCR);
+ clk_disable(si->firi_clk);
+
+ gpio_firi_inactive();
+
+ si->suspend = 1;
+ si->open = 0;
+ }
+ return 0;
+}
+
+/*!
+ * This function is called to bring the FIRI back from a low power state. Refer
+ * to the document driver-model/driver.txt in the kernel source tree for more
+ * information.
+ *
+ * @param pdev the device structure used to give information on which FIRI
+ * to resume
+ *
+ * @return The function always returns 0.
+ */
+static int mxc_irda_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct mxc_irda *si = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (!si) {
+ return 0;
+ }
+
+ if (si->suspend == 1 && !si->open) {
+
+ /*Initialise the UART first */
+ clk_enable(si->uart_clk);
+
+ /*Now init FIRI */
+ gpio_firi_active(si->firi_base + FIRITCR, FIRITCR_TPP);
+ mxc_irda_startup(si);
+
+ /* Enable the UART and FIRI interrupts.. */
+ local_irq_save(flags);
+ enable_irq(si->uart_irq);
+ if (si->uart_irq != si->firi_irq) {
+ enable_irq(si->firi_irq);
+ if (si->uart_irq != si->uart_irq1) {
+ enable_irq(si->uart_irq1);
+ }
+ }
+ local_irq_restore(flags);
+
+ /* Let the kernel know that we are alive and kicking.. */
+ netif_device_attach(ndev);
+
+ si->suspend = 0;
+ si->open = 1;
+ }
+ return 0;
+}
+#else
+#define mxc_irda_suspend NULL
+#define mxc_irda_resume NULL
+#endif
+
+static int mxc_irda_init_iobuf(iobuff_t * io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
+ if (io->head != NULL) {
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+ }
+ return io->head ? 0 : -ENOMEM;
+
+}
+
+/*!
+ * This function is called during the driver binding process.
+ * This function requests for memory, initializes net_device structure and
+ * registers with kernel.
+ *
+ * @param pdev the device structure used to store device specific
+ * information that is used by the suspend, resume and remove
+ * functions
+ *
+ * @return The function returns 0 on success and a non-zero value on failure
+ */
+static int mxc_irda_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct mxc_irda *si;
+ struct resource *uart_res, *firi_res;
+ int uart_irq, firi_irq, uart_irq1;
+ unsigned int baudrate_mask = 0;
+ int err;
+
+ uart_res = &pdev->resource[0];
+ uart_irq = pdev->resource[1].start;
+
+ firi_res = &pdev->resource[2];
+ firi_irq = pdev->resource[3].start;
+
+ uart_irq1 = pdev->resource[4].start;
+
+ if (!uart_res || uart_irq == NO_IRQ || !firi_res || firi_irq == NO_IRQ) {
+ dev_err(&pdev->dev, "Unable to find resources\n");
+ return -ENXIO;
+ }
+
+ err =
+ request_mem_region(uart_res->start, SZ_16K,
+ "MXC_IRDA") ? 0 : -EBUSY;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request UART memory region\n");
+ return -ENOMEM;
+ }
+
+ err =
+ request_mem_region(firi_res->start, SZ_16K,
+ "MXC_IRDA") ? 0 : -EBUSY;
+ if (err) {
+ dev_err(&pdev->dev, "Failed to request FIRI memory region\n");
+ goto err_mem_1;
+ }
+
+ dev = alloc_irdadev(sizeof(struct mxc_irda));
+ if (!dev) {
+ goto err_mem_2;
+ }
+
+ si = netdev_priv(dev);
+ si->dev = &pdev->dev;
+
+ si->mxc_ir_plat = pdev->dev.platform_data;
+ si->uart_clk = si->mxc_ir_plat->uart_clk;
+
+ si->uart_res = uart_res;
+ si->firi_res = firi_res;
+ si->uart_irq = uart_irq;
+ si->firi_irq = firi_irq;
+ si->uart_irq1 = uart_irq1;
+
+ si->uart_base = ioremap(uart_res->start, SZ_16K);
+ si->firi_base = ioremap(firi_res->start, SZ_16K);
+
+ if (!(si->uart_base || si->firi_base)) {
+ err = -ENOMEM;
+ goto err_mem_3;
+ }
+
+ /*
+ * Initialise the SIR buffers
+ */
+ err = mxc_irda_init_iobuf(&si->rx_buff, UART_BUFF_SIZE);
+ if (err) {
+ goto err_mem_4;
+ }
+
+ err = mxc_irda_init_iobuf(&si->tx_buff, UART_BUFF_SIZE);
+ if (err) {
+ goto err_mem_5;
+ }
+
+ dev->hard_start_xmit = mxc_irda_hard_xmit;
+ dev->open = mxc_irda_start;
+ dev->stop = mxc_irda_stop;
+ dev->do_ioctl = mxc_irda_ioctl;
+ dev->get_stats = mxc_irda_stats;
+
+ irda_init_max_qos_capabilies(&si->qos);
+
+ /*
+ * We support
+ * SIR(9600, 19200,38400, 57600 and 115200 bps)
+ * FIR(4 Mbps)
+ * Min Turn Time set to 1ms or greater.
+ */
+ baudrate_mask |= IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
+ baudrate_mask |= IR_4000000 << 8;
+
+ si->qos.baud_rate.bits &= baudrate_mask;
+ si->qos.min_turn_time.bits = 0x7;
+
+ irda_qos_bits_to_value(&si->qos);
+
+#ifdef FIRI_SDMA_RX
+ si->tskb = NULL;
+ tasklet_init(&dma_rx_tasklet, mxc_irda_rx_task, (unsigned long)si);
+#endif
+ err = register_netdev(dev);
+ if (err == 0) {
+ platform_set_drvdata(pdev, dev);
+ } else {
+ kfree(si->tx_buff.head);
+ err_mem_5:
+ kfree(si->rx_buff.head);
+ err_mem_4:
+ iounmap(si->uart_base);
+ iounmap(si->firi_base);
+ err_mem_3:
+ free_netdev(dev);
+ err_mem_2:
+ release_mem_region(firi_res->start, SZ_16K);
+ err_mem_1:
+ release_mem_region(uart_res->start, SZ_16K);
+ }
+ return err;
+}
+
+/*!
+ * Dissociates the driver from the FIRI device. Removes the appropriate FIRI
+ * port structure from the kernel.
+ *
+ * @param pdev the device structure used to give information on which FIRI
+ * to remove
+ *
+ * @return The function always returns 0.
+ */
+static int mxc_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct mxc_irda *si = netdev_priv(dev);
+
+ if (si->uart_base)
+ iounmap(si->uart_base);
+ if (si->firi_base)
+ iounmap(si->firi_base);
+ if (si->firi_res->start)
+ release_mem_region(si->firi_res->start, SZ_16K);
+ if (si->uart_res->start)
+ release_mem_region(si->uart_res->start, SZ_16K);
+ if (si->tx_buff.head)
+ kfree(si->tx_buff.head);
+ if (si->rx_buff.head)
+ kfree(si->rx_buff.head);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+ return 0;
+}
+
+/*!
+ * This structure contains pointers to the power management callback functions.
+ */
+static struct platform_driver mxcir_driver = {
+ .driver = {
+ .name = "mxcir",
+ },
+ .probe = mxc_irda_probe,
+ .remove = mxc_irda_remove,
+ .suspend = mxc_irda_suspend,
+ .resume = mxc_irda_resume,
+};
+
+/*!
+ * This function is used to initialize the FIRI driver module. The function
+ * registers the power management callback functions with the kernel and also
+ * registers the FIRI callback functions.
+ *
+ * @return The function returns 0 on success and a non-zero value on failure.
+ */
+static int __init mxc_irda_init(void)
+{
+ return platform_driver_register(&mxcir_driver);
+}
+
+/*!
+ * This function is used to cleanup all resources before the driver exits.
+ */
+static void __exit mxc_irda_exit(void)
+{
+ platform_driver_unregister(&mxcir_driver);
+}
+
+module_init(mxc_irda_init);
+module_exit(mxc_irda_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("MXC IrDA(SIR/FIR) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/mxc_ir.h b/drivers/net/irda/mxc_ir.h
new file mode 100644
index 000000000000..6b22ca129f27
--- /dev/null
+++ b/drivers/net/irda/mxc_ir.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXC_FIRI_REG_H__
+#define __MXC_FIRI_REG_H__
+
+#include <mach/hardware.h>
+
+/*!
+ * @defgroup FIRI Fast IR Driver
+ */
+
+/*!
+ * @file mxc_ir.h
+ *
+ * @brief MXC FIRI header file
+ *
+ * This file defines base address and bits of FIRI registers
+ *
+ * @ingroup FIRI
+ */
+
+/*!
+ * FIRI maximum packet length
+ */
+#define FIR_MAX_RXLEN 2047
+
+/*
+ * FIRI Transmitter Control Register
+ */
+#define FIRITCR 0x00
+/*
+ * FIRI Transmitter Count Register
+ */
+#define FIRITCTR 0x04
+/*
+ * FIRI Receiver Control Register
+ */
+#define FIRIRCR 0x08
+/*
+ * FIRI Transmitter Status Register
+ */
+#define FIRITSR 0x0C
+/*
+ * FIRI Receiver Status Register
+ */
+#define FIRIRSR 0x10
+/*
+ * FIRI Transmitter FIFO
+ */
+#define FIRITXFIFO 0x14
+/*
+ * FIRI Receiver FIFO
+ */
+#define FIRIRXFIFO 0x18
+/*
+ * FIRI Control Register
+ */
+#define FIRICR 0x1C
+
+/*
+ * Bit definitions of Transmitter Controller Register
+ */
+#define FIRITCR_HAG (1<<24) /* H/W address generator */
+#define FIRITCR_SRF_FIR (0<<13) /* Start field repeat factor */
+#define FIRITCR_SRF_MIR (1<<13) /* Start field Repeat Factor */
+#define FIRITCR_TDT_MIR (2<<10) /* TX trigger for MIR is set to 32 bytes) */
+#define FIRITCR_TDT_FIR (1<<10) /* TX trigger for FIR is set to 16 bytes) */
+#define FIRITCR_TCIE (1<<9) /* TX Complete Interrupt Enable */
+#define FIRITCR_TPEIE (1<<8) /* TX Packet End Interrupt Enable */
+#define FIRITCR_TFUIE (1<<7) /* TX FIFO Under-run Interrupt Enable */
+#define FIRITCR_PCF (1<<6) /* Packet Complete by FIFO */
+#define FIRITCR_PC (1<<5) /* Packet Complete */
+#define FIRITCR_SIP (1<<4) /* TX Enable of SIP */
+#define FIRITCR_TPP (1<<3) /* TX Pulse Polarity bit */
+#define FIRITCR_TM_FIR (0<<1) /* TX Mode 4 Mbps */
+#define FIRITCR_TM_MIR1 (1<<1) /* TX Mode 0.576 Mbps */
+#define FIRITCR_TM_MIR2 (1<<2) /* TX Mode 1.152 Mbps */
+#define FIRITCR_TE (1<<0) /* TX Enable */
+
+/*
+ * Bit definitions of Transmitter Count Register
+ */
+#define FIRITCTR_TPL 511 /* TX Packet Length set to 512 bytes */
+
+/*
+ * Bit definitions of Receiver Control Register
+ */
+#define FIRIRCR_RAM (1<<24) /* RX Address Match */
+#define FIRIRCR_RPEDE (1<<11) /* Packet End DMA request Enable */
+#define FIRIRCR_RDT_MIR (2<<8) /* DMA Trigger level(64 bytes in RXFIFO) */
+#define FIRIRCR_RDT_FIR (1<<8) /* DMA Trigger level(16 bytes in RXFIFO) */
+#define FIRIRCR_RPA (1<<7) /* RX Packet Abort */
+#define FIRIRCR_RPEIE (1<<6) /* RX Packet End Interrupt Enable */
+#define FIRIRCR_PAIE (1<<5) /* Packet Abort Interrupt Enable */
+#define FIRIRCR_RFOIE (1<<4) /* RX FIFO Overrun Interrupt Enable */
+#define FIRIRCR_RPP (1<<3) /* RX Pulse Polarity bit */
+#define FIRIRCR_RM_FIR (0<<1) /* 4 Mbps */
+#define FIRIRCR_RM_MIR1 (1<<1) /* 0.576 Mbps */
+#define FIRIRCR_RM_MIR2 (1<<2) /* 1.152 Mbps */
+#define FIRIRCR_RE (1<<0) /* RX Enable */
+
+/* Transmitter Status Register */
+#define FIRITSR_TFP 0xFF00 /* Mask for available bytes in TX FIFO */
+#define FIRITSR_TC (1<<3) /* Transmit Complete bit */
+#define FIRITSR_SIPE (1<<2) /* SIP End bit */
+#define FIRITSR_TPE (1<<1) /* Transmit Packet End */
+#define FIRITSR_TFU (1<<0) /* TX FIFO Under-run */
+
+/* Receiver Status Register */
+#define FIRIRSR_RFP 0xFF00 /* mask for available bytes RX FIFO */
+#define FIRIRSR_PAS (1<<5) /* preamble search */
+#define FIRIRSR_RPE (1<<4) /* RX Packet End */
+#define FIRIRSR_RFO (1<<3) /* RX FIFO Overrun */
+#define FIRIRSR_BAM (1<<2) /* Broadcast Address Match */
+#define FIRIRSR_CRCE (1<<1) /* CRC error */
+#define FIRIRSR_DDE (1<<0) /* Address, control or data field error */
+
+/* FIRI Control Register */
+#define FIRICR_BL (32<<5) /* Burst Length is set to 32 */
+#define FIRICR_OSF (0<<1) /* Over Sampling Factor */
+
+#endif /* __MXC_FIRI_REG_H__ */
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index cc7d85bdfb3e..2ea8f4aa1d91 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -52,6 +52,14 @@
#ifdef SMC_USE_PXA_DMA
#define SMC_USE_DMA
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
+#endif
+
+#ifdef CONFIG_ARCH_MXC
+ #define SMC_USE_16BIT 0
+ #define SMC_USE_32BIT 1
+ #define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#endif
/* store this information for the driver.. */
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
new file mode 100644
index 000000000000..e416066d9be9
--- /dev/null
+++ b/drivers/net/smsc911x.c
@@ -0,0 +1,2198 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2007 SMSC
+ * Copyright (C) 2005 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************
+ * Rewritten, heavily based on smsc911x simple driver by SMSC.
+ * Partly uses io macros from smc91x.c by Nicolas Pitre
+ *
+ * Supported devices:
+ * LAN9115, LAN9116, LAN9117, LAN9118
+ * LAN9215, LAN9216, LAN9217, LAN9218
+ *
+ * History:
+ * 05/05/2005 bahadir.balban@arm.com
+ * - Transition to linux coding style
+ * - Platform driver and module interface
+ *
+ * 17/07/2006 steve.glendinning@smsc.com
+ * - Added support for LAN921x family
+ * - Added workaround for multicast filters
+ *
+ * 31/07/2006 steve.glendinning@smsc.com
+ * - Removed tasklet, using NAPI poll instead
+ * - Multiple device support
+ * - Large tidy-up following feedback from netdev list
+ *
+ * 03/08/2006 steve.glendinning@smsc.com
+ * - Added ethtool support
+ * - Convert to use generic MII interface
+ *
+ * 04/08/2006 bahadir.balban@arm.com
+ * - Added ethtool eeprom r/w support
+ *
+ * 17/06/2007 steve.glendinning@smsc.com
+ * - Incorporate changes from Bill Gatliff and Russell King
+ *
+ * 04/07/2007 steve.glendinning@smsc.com
+ * - move irq configuration to platform_device
+ * - fix link poller after interface is stopped and restarted
+ *
+ * 13/07/2007 bahadir.balban@arm.com
+ * - set irq polarity before requesting irq
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/version.h>
+#include <linux/bug.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+#include "smsc911x.h"
+
+#define SMSC_CHIPNAME "smsc911x"
+#define SMSC_DRV_VERSION "2007-07-13"
+
+MODULE_LICENSE("GPL");
+
+struct smsc911x_data {
+ void __iomem *ioaddr;
+
+ unsigned int idrev;
+ unsigned int generation; /* used to decide which workarounds apply */
+
+ /* device configuration */
+ unsigned int irq_polarity;
+ unsigned int irq_type;
+
+ /* This needs to be acquired before calling any of below:
+ * smsc911x_mac_read(), smsc911x_mac_write()
+ * smsc911x_phy_read(), smsc911x_phy_write()
+ */
+ spinlock_t phy_lock;
+
+ struct net_device_stats stats;
+ struct mii_if_info mii;
+ unsigned int using_extphy;
+ u32 msg_enable;
+#ifdef USE_LED1_WORK_AROUND
+ unsigned int gpio_setting;
+ unsigned int gpio_orig_setting;
+#endif
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct timer_list link_poll_timer;
+ unsigned int stop_link_poll;
+
+ unsigned int software_irq_signal;
+
+#ifdef USE_PHY_WORK_AROUND
+#define MIN_PACKET_SIZE (64)
+ char loopback_tx_pkt[MIN_PACKET_SIZE];
+ char loopback_rx_pkt[MIN_PACKET_SIZE];
+ unsigned int resetcount;
+#endif
+
+ /* Members for Multicast filter workaround */
+ unsigned int multicast_update_pending;
+ unsigned int set_bits_mask;
+ unsigned int clear_bits_mask;
+ unsigned int hashhi;
+ unsigned int hashlo;
+};
+
+#if SMSC_CAN_USE_32BIT
+
+static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ return readl(pdata->ioaddr + reg);
+}
+
+static inline void smsc911x_reg_write(u32 val, struct smsc911x_data *pdata,
+ u32 reg)
+{
+ writel(val, pdata->ioaddr + reg);
+}
+
+#elif SMSC_CAN_USE_SPI
+
+static u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ reg_val = spi_cpld_read(reg);
+ local_irq_restore(flags);
+
+ return reg_val;
+}
+
+static void smsc911x_reg_write(u32 val, struct smsc911x_data *pdata,
+ u32 reg)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ spi_cpld_write(reg, val);
+ local_irq_restore(flags);
+}
+
+#else /* SMSC_CAN_USE_32BIT */
+
+static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
+{
+ u32 reg_val;
+ unsigned long flags;
+
+ /* these two 16-bit reads must be performed consecutively, so must
+ * not be interrupted by our own ISR (which would start another
+ * read operation) */
+ local_irq_save(flags);
+ reg_val = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
+ ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
+ local_irq_restore(flags);
+
+ return reg_val;
+}
+
+static inline void smsc911x_reg_write(u32 val, struct smsc911x_data *pdata,
+ u32 reg)
+{
+ unsigned long flags;
+
+ /* these two 16-bit writes must be performed consecutively, so must
+ * not be interrupted by our own ISR (which would start another
+ * read operation) */
+ local_irq_save(flags);
+ writew(val & 0xFFFF, pdata->ioaddr + reg);
+ writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
+ local_irq_restore(flags);
+}
+
+#endif /* SMSC_CAN_USE_32BIT */
+
+/* Writes a packet to the TX_DATA_FIFO */
+static inline void
+smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ while (wordcount--)
+ smsc911x_reg_write(*buf++, pdata, TX_DATA_FIFO);
+}
+
+/* Reads a packet out of the RX_DATA_FIFO */
+static inline void
+smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ while (wordcount--)
+ *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+}
+
+/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
+ * and smsc911x_mac_write, so assumes phy_lock is held */
+static int smsc911x_mac_notbusy(struct smsc911x_data *pdata)
+{
+ int i;
+ u32 val;
+
+ for (i = 0; i < 40; i++) {
+ val = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (!(val & MAC_CSR_CMD_CSR_BUSY_))
+ return 1;
+ }
+ SMSC_WARNING("Timed out waiting for MAC not BUSY. "
+ "MAC_CSR_CMD: 0x%08X", val);
+ return 0;
+}
+
+/* Fetches a MAC register value. Assumes phy_lock is acquired */
+static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
+{
+ unsigned int temp;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (!spin_is_locked(&pdata->phy_lock))
+ SMSC_WARNING("phy_lock not held");
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+ temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
+ SMSC_WARNING("smsc911x_mac_read failed, MAC busy at entry");
+ return 0xFFFFFFFF;
+ }
+
+ /* Send the MAC cmd */
+ smsc911x_reg_write(((offset & 0xFF) | MAC_CSR_CMD_CSR_BUSY_
+ | MAC_CSR_CMD_R_NOT_W_), pdata, MAC_CSR_CMD);
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ /* Wait for the read to happen */
+ if (likely(smsc911x_mac_notbusy(pdata)))
+ return smsc911x_reg_read(pdata, MAC_CSR_DATA);
+
+ SMSC_WARNING("smsc911x_mac_read failed, MAC busy after read");
+ return 0xFFFFFFFF;
+}
+
+/* Set a mac register, phy_lock must be acquired before calling */
+static void smsc911x_mac_write(struct smsc911x_data *pdata,
+ unsigned int offset, u32 val)
+{
+ unsigned int temp;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (!spin_is_locked(&pdata->phy_lock))
+ SMSC_WARNING("phy_lock not held");
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+ temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
+ if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
+ SMSC_WARNING("smsc911x_mac_write failed, MAC busy at entry");
+ return;
+ }
+
+ /* Send data to write */
+ smsc911x_reg_write(val, pdata, MAC_CSR_DATA);
+
+ /* Write the actual data */
+ smsc911x_reg_write(((offset & 0xFF) | MAC_CSR_CMD_CSR_BUSY_), pdata,
+ MAC_CSR_CMD);
+
+ /* Workaround for hardware read-after-write restriction */
+ temp = smsc911x_reg_read(pdata, BYTE_TEST);
+
+ /* Wait for the write to complete */
+ if (likely(smsc911x_mac_notbusy(pdata)))
+ return;
+
+ SMSC_WARNING("smsc911x_mac_write failed, MAC busy after write");
+}
+
+/* Gets a phy register, phy_lock must be acquired before calling */
+static u16 smsc911x_phy_read(struct smsc911x_data *pdata, unsigned int index)
+{
+ unsigned int addr;
+ int i;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (!spin_is_locked(&pdata->phy_lock))
+ SMSC_WARNING("phy_lock not held");
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+ /* Confirm MII not busy */
+ if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ SMSC_WARNING("MII is busy in smsc911x_phy_read???");
+ return 0;
+ }
+
+ /* Set the address, index & direction (read from PHY) */
+ addr = (((pdata->mii.phy_id) & 0x1F) << 11)
+ | ((index & 0x1F) << 6);
+ smsc911x_mac_write(pdata, MII_ACC, addr);
+
+ /* Wait for read to complete w/ timeout */
+ for (i = 0; i < 100; i++) {
+ /* See if MII is finished yet */
+ if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ return smsc911x_mac_read(pdata, MII_DATA);
+ }
+ }
+ SMSC_WARNING("Timed out waiting for MII write to finish");
+ return 0xFFFF;
+}
+
+/* Sets a phy register, phy_lock must be acquired before calling */
+static void smsc911x_phy_write(struct smsc911x_data *pdata,
+ unsigned int index, u16 val)
+{
+ unsigned int addr;
+ int i;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ if (!spin_is_locked(&pdata->phy_lock))
+ SMSC_WARNING("phy_lock not held");
+#endif /* CONFIG_DEBUG_SPINLOCK */
+
+ /* Confirm MII not busy */
+ if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
+ SMSC_WARNING("MII is busy in smsc911x_write_phy???");
+ return;
+ }
+
+ /* Put the data to write in the MAC */
+ smsc911x_mac_write(pdata, MII_DATA, val);
+
+ /* Set the address, index & direction (write to PHY) */
+ addr = (((pdata->mii.phy_id) & 0x1F) << 11) |
+ ((index & 0x1F) << 6) | MII_ACC_MII_WRITE_;
+ smsc911x_mac_write(pdata, MII_ACC, addr);
+
+ /* Wait for write to complete w/ timeout */
+ for (i = 0; i < 100; i++) {
+ /* See if MII is finished yet */
+ if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_))
+ return;
+ }
+ SMSC_WARNING("Timed out waiting for MII write to finish");
+}
+
+static int smsc911x_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+ int reg;
+
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ reg = smsc911x_phy_read(pdata, location);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+
+ return reg;
+}
+
+static void smsc911x_mdio_write(struct net_device *dev, int phy_id,
+ int location, int val)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_phy_write(pdata, location, val);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+}
+
+/* Autodetects and initialises external phy for SMSC9115 and SMSC9117 flavors.
+ * If something goes wrong, returns -ENODEV to revert back to internal phy.
+ * Performed at initialisation only, so interrupts are enabled */
+static int smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
+{
+ unsigned int address;
+ unsigned int hwcfg;
+ unsigned int phyid1;
+ unsigned int phyid2;
+
+ hwcfg = smsc911x_reg_read(pdata, HW_CFG);
+
+ /* External phy is requested, supported, and detected */
+ if (hwcfg & HW_CFG_EXT_PHY_DET_) {
+
+ /* Attempt to switch to external phy for auto-detecting
+ * its address. Assuming tx and rx are stopped because
+ * smsc911x_phy_initialise is called before
+ * smsc911x_rx_initialise and tx_initialise.
+ */
+
+ /* Disable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+ udelay(10); /* Enough time for clocks to stop */
+
+ /* Switch to external phy */
+ hwcfg |= HW_CFG_EXT_PHY_EN_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+
+ /* Enable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+ udelay(10); /* Enough time for clocks to restart */
+
+ hwcfg |= HW_CFG_SMI_SEL_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+
+ /* Auto-detect PHY */
+ spin_lock_irq(&pdata->phy_lock);
+ for (address = 0; address <= 31; address++) {
+ pdata->mii.phy_id = address;
+ phyid1 = smsc911x_phy_read(pdata, MII_PHYSID1);
+ phyid2 = smsc911x_phy_read(pdata, MII_PHYSID2);
+ if ((phyid1 != 0xFFFFU) || (phyid2 != 0xFFFFU)) {
+ SMSC_TRACE("Detected PHY at address = "
+ "0x%02X = %d", address, address);
+ break;
+ }
+ }
+ spin_unlock_irq(&pdata->phy_lock);
+
+ if ((phyid1 == 0xFFFFU) && (phyid2 == 0xFFFFU)) {
+ SMSC_WARNING("External PHY is not accessable, "
+ "using internal PHY instead");
+ /* Revert back to internal phy settings. */
+
+ /* Disable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+ udelay(10); /* Enough time for clocks to stop */
+
+ /* Switch to internal phy */
+ hwcfg &= (~HW_CFG_EXT_PHY_EN_);
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+
+ /* Enable phy clocks to the MAC */
+ hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
+ hwcfg |= HW_CFG_PHY_CLK_SEL_INT_PHY_;
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+ udelay(10); /* Enough time for clocks to restart */
+
+ hwcfg &= (~HW_CFG_SMI_SEL_);
+ smsc911x_reg_write(hwcfg, pdata, HW_CFG);
+ /* Use internal phy */
+ return -ENODEV;
+ } else {
+ SMSC_TRACE("Successfully switched to external PHY");
+ pdata->using_extphy = 1;
+ }
+ } else {
+ SMSC_WARNING("No external PHY detected.");
+ SMSC_WARNING("Using internal PHY instead.");
+ /* Use internal phy */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/* called by phy_initialise and loopback test */
+static int smsc911x_phy_reset(struct smsc911x_data *pdata)
+{
+ unsigned int temp;
+ unsigned int i = 100000;
+ unsigned long flags;
+
+ SMSC_TRACE("Performing PHY BCR Reset");
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_phy_write(pdata, MII_BMCR, BMCR_RESET);
+ do {
+ udelay(10);
+ temp = smsc911x_phy_read(pdata, MII_BMCR);
+ } while ((i--) && (temp & BMCR_RESET));
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+
+ if (temp & BMCR_RESET) {
+ SMSC_WARNING("PHY reset failed to complete.");
+ return 0;
+ }
+ /* Extra delay required because the phy may not be completed with
+ * its reset when BMCR_RESET is cleared. Specs say 256 uS is
+ * enough delay but using 1ms here to be safe
+ */
+ msleep(1);
+
+ return 1;
+}
+
+/* Fetches a tx status out of the status fifo */
+static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata)
+{
+ unsigned int result =
+ smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_;
+
+ if (result != 0)
+ result = smsc911x_reg_read(pdata, TX_STATUS_FIFO);
+
+ return result;
+}
+
+/* Fetches the next rx status */
+static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata)
+{
+ unsigned int result =
+ smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_;
+
+ if (result != 0)
+ result = smsc911x_reg_read(pdata, RX_STATUS_FIFO);
+
+ return result;
+}
+
+#ifdef USE_PHY_WORK_AROUND
+static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
+{
+ unsigned int tries;
+ u32 wrsz;
+ u32 rdsz;
+ u32 bufp;
+
+ for (tries = 0; tries < 10; tries++) {
+ unsigned int txcmd_a;
+ unsigned int txcmd_b;
+ unsigned int status;
+ unsigned int pktlength;
+ unsigned int i;
+
+ /* Zero-out rx packet memory */
+ memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE);
+
+ /* Write tx packet to 118 */
+ txcmd_a = (((unsigned int)pdata->loopback_tx_pkt)
+ & 0x03) << 16;
+ txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+ txcmd_a |= MIN_PACKET_SIZE;
+
+ txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE;
+
+ smsc911x_reg_write(txcmd_a, pdata, TX_DATA_FIFO);
+ smsc911x_reg_write(txcmd_b, pdata, TX_DATA_FIFO);
+
+ bufp = ((u32) pdata->loopback_tx_pkt) & 0xFFFFFFFC;
+ wrsz = MIN_PACKET_SIZE + 3;
+ wrsz += (((u32) pdata->loopback_tx_pkt) & 0x3);
+ wrsz >>= 2;
+
+ smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+
+ /* Wait till transmit is done */
+ i = 60;
+ do {
+ udelay(5);
+ status = smsc911x_tx_get_txstatus(pdata);
+ } while ((i--) && (!status));
+
+ if (!status) {
+ SMSC_WARNING("Failed to transmit during loopback test");
+ continue;
+ }
+ if (status & TX_STS_ES_) {
+ SMSC_WARNING("Transmit encountered errors during "
+ "loopback test");
+ continue;
+ }
+
+ /* Wait till receive is done */
+ i = 60;
+ do {
+ udelay(5);
+ status = smsc911x_rx_get_rxstatus(pdata);
+ } while ((i--) && (!status));
+
+ if (!status) {
+ SMSC_WARNING("Failed to receive during loopback test");
+ continue;
+ }
+ if (status & RX_STS_ES_) {
+ SMSC_WARNING("Receive encountered errors during "
+ "loopback test");
+ continue;
+ }
+
+ pktlength = ((status & 0x3FFF0000UL) >> 16);
+ bufp = (u32)pdata->loopback_rx_pkt;
+ rdsz = pktlength + 3;
+ rdsz += ((u32)pdata->loopback_rx_pkt) & 0x3;
+ rdsz >>= 2;
+
+ smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
+
+ if (pktlength != (MIN_PACKET_SIZE + 4)) {
+ SMSC_WARNING("Unexpected packet size during "
+ "loop back test, size=%d, "
+ "will retry", pktlength);
+ } else {
+ unsigned int j;
+ int mismatch = 0;
+ for (j = 0; j < MIN_PACKET_SIZE; j++) {
+ if (pdata->loopback_tx_pkt[j]
+ != pdata->loopback_rx_pkt[j]) {
+ mismatch = 1;
+ break;
+ }
+ }
+ if (!mismatch) {
+ SMSC_TRACE("Successfully verified "
+ "loopback packet");
+ return 1;
+ } else {
+ SMSC_WARNING("Data miss match during "
+ "loop back test, will retry.");
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int smsc911x_phy_loopbacktest(struct smsc911x_data *pdata)
+{
+ int result = 0;
+ unsigned int i;
+ unsigned int val;
+ unsigned long flags;
+
+ /* Initialise tx packet */
+ for (i = 0; i < 6; i++) {
+ /* Use broadcast destination address */
+ pdata->loopback_tx_pkt[i] = (char)0xFF;
+ }
+
+ for (i = 6; i < 12; i++) {
+ /* Use incrementing source address */
+ pdata->loopback_tx_pkt[i] = (char)i;
+ }
+
+ /* Set length type field */
+ pdata->loopback_tx_pkt[12] = 0x00;
+ pdata->loopback_tx_pkt[13] = 0x00;
+ for (i = 14; i < MIN_PACKET_SIZE; i++) {
+ pdata->loopback_tx_pkt[i] = (char)i;
+ }
+
+ val = smsc911x_reg_read(pdata, HW_CFG);
+ val &= HW_CFG_TX_FIF_SZ_;
+ val |= HW_CFG_SF_;
+ smsc911x_reg_write(val, pdata, HW_CFG);
+
+ smsc911x_reg_write(TX_CFG_TX_ON_, pdata, TX_CFG);
+ smsc911x_reg_write((((unsigned int)pdata->loopback_rx_pkt)
+ & 0x03) << 8, pdata, RX_CFG);
+
+ for (i = 0; i < 10; i++) {
+ /* Set PHY to 10/FD, no ANEG, and loopback mode */
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_phy_write(pdata, MII_BMCR, 0x4100);
+
+ /* Enable MAC tx/rx, FD */
+ smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_
+ | MAC_CR_TXEN_ | MAC_CR_RXEN_);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+
+ if (smsc911x_phy_check_loopbackpkt(pdata)) {
+ result = 1;
+ break;
+ }
+ pdata->resetcount++;
+
+ /* Disable MAC rx */
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_mac_write(pdata, MAC_CR, 0);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+
+ smsc911x_phy_reset(pdata);
+ }
+
+ /* Disable MAC */
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_mac_write(pdata, MAC_CR, 0);
+
+ /* Cancel PHY loopback mode */
+ smsc911x_phy_write(pdata, MII_BMCR, 0);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+
+ smsc911x_reg_write(0, pdata, TX_CFG);
+ smsc911x_reg_write(0, pdata, RX_CFG);
+
+ return result;
+}
+#endif /* USE_PHY_WORK_AROUND */
+
+/* assumes phy_lock is held */
+static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
+{
+ unsigned int temp;
+
+ if (pdata->mii.full_duplex) {
+ unsigned int phy_adv;
+ unsigned int phy_lpa;
+ phy_adv = smsc911x_phy_read(pdata, MII_ADVERTISE);
+ phy_lpa = smsc911x_phy_read(pdata, MII_LPA);
+ if (phy_adv & phy_lpa & LPA_PAUSE_CAP) {
+ /* Both ends support symmetric pause, enable
+ * PAUSE receive and transmit */
+ smsc911x_mac_write(pdata, FLOW, 0xFFFF0002);
+ temp = smsc911x_reg_read(pdata, AFC_CFG);
+ temp |= 0xF;
+ smsc911x_reg_write(temp, pdata, AFC_CFG);
+ } else if (((phy_adv & ADVERTISE_PAUSE_ALL) ==
+ ADVERTISE_PAUSE_ALL) &&
+ ((phy_lpa & LPA_PAUSE_ALL) == LPA_PAUSE_ASYM)) {
+ /* We support symmetric and asym pause, the
+ * other end only supports asym, Enable PAUSE
+ * receive, disable PAUSE transmit */
+ smsc911x_mac_write(pdata, FLOW, 0xFFFF0002);
+ temp = smsc911x_reg_read(pdata, AFC_CFG);
+ temp &= ~0xF;
+ smsc911x_reg_write(temp, pdata, AFC_CFG);
+ } else {
+ /* Disable PAUSE receive and transmit */
+ smsc911x_mac_write(pdata, FLOW, 0);
+ temp = smsc911x_reg_read(pdata, AFC_CFG);
+ temp &= ~0xF;
+ smsc911x_reg_write(temp, pdata, AFC_CFG);
+ }
+ } else {
+ smsc911x_mac_write(pdata, FLOW, 0);
+ temp = smsc911x_reg_read(pdata, AFC_CFG);
+ temp |= 0xF;
+ smsc911x_reg_write(temp, pdata, AFC_CFG);
+ }
+}
+
+/* Update link mode if any thing has changed */
+static void smsc911x_phy_update_linkmode(struct net_device *dev, int init)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+
+ if (mii_check_media(&pdata->mii, netif_msg_link(pdata), init)) {
+ /* duplex state has changed */
+ unsigned int mac_cr;
+
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ if (pdata->mii.full_duplex) {
+ SMSC_TRACE("configuring for full duplex mode");
+ mac_cr |= MAC_CR_FDPX_;
+ } else {
+ SMSC_TRACE("configuring for half duplex mode");
+ mac_cr &= ~MAC_CR_FDPX_;
+ }
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+
+ smsc911x_phy_update_flowcontrol(pdata);
+
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+ }
+#ifdef USE_LED1_WORK_AROUND
+ if (netif_carrier_ok(dev)) {
+ if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
+ (!pdata->using_extphy)) {
+ /* Restore orginal GPIO configuration */
+ pdata->gpio_setting = pdata->gpio_orig_setting;
+ smsc911x_reg_write(pdata->gpio_setting, pdata,
+ GPIO_CFG);
+ }
+ } else {
+ /* Check global setting that LED1
+ * usage is 10/100 indicator */
+ pdata->gpio_setting = smsc911x_reg_read(pdata, GPIO_CFG);
+ if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_)
+ && (!pdata->using_extphy)) {
+ /* Force 10/100 LED off, after saving
+ * orginal GPIO configuration */
+ pdata->gpio_orig_setting = pdata->gpio_setting;
+
+ pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
+ pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
+ | GPIO_CFG_GPIODIR0_
+ | GPIO_CFG_GPIOD0_);
+ smsc911x_reg_write(pdata->gpio_setting, pdata,
+ GPIO_CFG);
+ }
+ }
+#endif /* USE_LED1_WORK_AROUND */
+}
+
+/* Entry point for the link poller */
+static void smsc911x_phy_checklink(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *)ptr;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_phy_update_linkmode(dev, 0);
+
+ if (!(pdata->stop_link_poll)) {
+ pdata->link_poll_timer.expires = jiffies + 2 * HZ;
+ add_timer(&pdata->link_poll_timer);
+ } else {
+ pdata->stop_link_poll = 0;
+ }
+}
+
+/* Initialises the PHY layer. Called at initialisation by open() so
+ * interrupts are enabled */
+static int smsc911x_phy_initialise(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int phyid1 = 0;
+ unsigned int phyid2 = 0;
+ unsigned int temp;
+
+ pdata->using_extphy = 0;
+
+ switch (pdata->idrev & 0xFFFF0000) {
+ case 0x01170000:
+ case 0x01150000:
+ /* External PHY supported, try to autodetect */
+ if (smsc911x_phy_initialise_external(pdata) < 0) {
+ SMSC_TRACE("External PHY is not detected, using "
+ "internal PHY instead");
+ pdata->mii.phy_id = 1;
+ }
+ break;
+ default:
+ SMSC_TRACE("External PHY is not supported, using internal PHY "
+ "instead");
+ pdata->mii.phy_id = 1;
+ break;
+ }
+
+ spin_lock_irq(&pdata->phy_lock);
+ phyid1 = smsc911x_phy_read(pdata, MII_PHYSID1);
+ phyid2 = smsc911x_phy_read(pdata, MII_PHYSID2);
+ spin_unlock_irq(&pdata->phy_lock);
+
+ if ((phyid1 == 0xFFFF) && (phyid2 == 0xFFFF)) {
+ SMSC_WARNING("Internal PHY not detected!");
+ return 0;
+ }
+
+ /* Reset the phy */
+ if (!smsc911x_phy_reset(pdata)) {
+ SMSC_WARNING("PHY reset failed to complete.");
+ return 0;
+ }
+#ifdef USE_PHY_WORK_AROUND
+ if (!smsc911x_phy_loopbacktest(pdata)) {
+ SMSC_WARNING("Failed Loop Back Test");
+ return 0;
+ } else {
+ SMSC_TRACE("Passed Loop Back Test");
+ }
+#endif /* USE_PHY_WORK_AROUND */
+
+ /* Advertise all speeds and pause capabilities */
+ spin_lock_irq(&pdata->phy_lock);
+ temp = smsc911x_phy_read(pdata, MII_ADVERTISE);
+ temp |= (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+ smsc911x_phy_write(pdata, MII_ADVERTISE, temp);
+ pdata->mii.advertising = temp;
+
+ if (!pdata->using_extphy) {
+ /* using internal phy, enable PHY interrupts */
+ smsc911x_phy_read(pdata, MII_INTSTS);
+ smsc911x_phy_write(pdata, MII_INTMSK, PHY_INTMSK_DEFAULT_);
+ }
+
+ /* begin to establish link */
+ smsc911x_phy_write(pdata, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
+ spin_unlock_irq(&pdata->phy_lock);
+
+ smsc911x_phy_update_linkmode(dev, 1);
+
+ setup_timer(&pdata->link_poll_timer, smsc911x_phy_checklink,
+ (unsigned long)dev);
+ pdata->link_poll_timer.expires = jiffies + 2 * HZ;
+ add_timer(&pdata->link_poll_timer);
+
+ SMSC_TRACE("phy initialised succesfully");
+ return 1;
+}
+
+/* Gets the number of tx statuses in the fifo */
+static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata)
+{
+ unsigned int result = (smsc911x_reg_read(pdata, TX_FIFO_INF)
+ & TX_FIFO_INF_TSUSED_) >> 16;
+ return result;
+}
+
+/* Reads tx statuses and increments counters where necessary */
+static void smsc911x_tx_update_txcounters(struct smsc911x_data *pdata)
+{
+ unsigned int tx_stat;
+
+ while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) {
+ if (unlikely(tx_stat & 0x80000000)) {
+ /* In this driver the packet tag is used as the packet
+ * length. Since a packet length can never reach the
+ * size of 0x8000, this bit is reserved. It is worth
+ * noting that the "reserved bit" in the warning above
+ * does not reference a hardware defined reserved bit
+ * but rather a driver defined one.
+ */
+ SMSC_WARNING("Packet tag reserved bit is high");
+ } else {
+ if (unlikely(tx_stat & 0x00008000)) {
+ pdata->stats.tx_errors++;
+ } else {
+ pdata->stats.tx_packets++;
+ pdata->stats.tx_bytes += (tx_stat >> 16);
+ }
+ if (unlikely(tx_stat & 0x00000100)) {
+ pdata->stats.collisions += 16;
+ pdata->stats.tx_aborted_errors += 1;
+ } else {
+ pdata->stats.collisions +=
+ ((tx_stat >> 3) & 0xF);
+ }
+ if (unlikely(tx_stat & 0x00000800)) {
+ pdata->stats.tx_carrier_errors += 1;
+ }
+ if (unlikely(tx_stat & 0x00000200)) {
+ pdata->stats.collisions++;
+ pdata->stats.tx_aborted_errors++;
+ }
+ }
+ }
+}
+
+/* Increments the Rx error counters */
+static void
+smsc911x_rx_counterrors(struct smsc911x_data *pdata, unsigned int rxstat)
+{
+ int crc_err = 0;
+
+ if (unlikely(rxstat & 0x00008000)) {
+ pdata->stats.rx_errors++;
+ if (unlikely(rxstat & 0x00000002)) {
+ pdata->stats.rx_crc_errors++;
+ crc_err = 1;
+ }
+ }
+ if (likely(!crc_err)) {
+ if (unlikely((rxstat & 0x00001020) == 0x00001020)) {
+ /* Frame type indicates length,
+ * and length error is set */
+ pdata->stats.rx_length_errors++;
+ }
+ if (rxstat & RX_STS_MCAST_)
+ pdata->stats.multicast++;
+ }
+}
+
+/* Quickly dumps bad packets */
+static void
+smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
+{
+ unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
+
+ if (likely(pktwords >= 4)) {
+ unsigned int timeout = 500;
+ unsigned int val;
+ smsc911x_reg_write(RX_DP_CTRL_RX_FFWD_, pdata, RX_DP_CTRL);
+ do {
+ udelay(1);
+ val = smsc911x_reg_read(pdata, RX_DP_CTRL);
+ } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_));
+
+ if (unlikely(timeout == 0))
+ SMSC_WARNING("Timed out waiting for RX FFWD "
+ "to finish, RX_DP_CTRL: 0x%08X", val);
+ } else {
+ unsigned int temp;
+ while (pktwords--)
+ temp = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ }
+}
+
+/* NAPI poll function */
+static int smsc911x_poll(struct napi_struct *napi, int budget)
+{
+ struct smsc911x_data *pdata = container_of(napi, struct smsc911x_data, napi);
+ struct net_device *dev = pdata->netdev;
+ int npackets = 0;
+
+ while (npackets < budget) {
+ unsigned int pktlength;
+ unsigned int pktwords;
+ unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata);
+
+ /* break out of while loop if there are no more packets waiting */
+ if (!rxstat)
+ break;
+
+ pktlength = ((rxstat & 0x3FFF0000) >> 16);
+ pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2;
+ smsc911x_rx_counterrors(pdata, rxstat);
+
+ if (likely((rxstat & RX_STS_ES_) == 0)) {
+ struct sk_buff *skb;
+ skb = dev_alloc_skb(pktlength + NET_IP_ALIGN);
+ if (likely(skb)) {
+ skb->data = skb->head;
+ skb->tail = skb->head;
+ /* Align IP on 16B boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pktlength - 4);
+ smsc911x_rx_readfifo(pdata,
+ (unsigned int *)skb->head,
+ pktwords);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_NONE;
+ netif_receive_skb(skb);
+
+ /* Update counters */
+ pdata->stats.rx_packets++;
+ pdata->stats.rx_bytes += (pktlength - 4);
+ dev->last_rx = jiffies;
+ npackets++;
+ continue;
+ } else {
+ SMSC_WARNING("Unable to allocate sk_buff "
+ "for rx packet, in PIO path");
+ pdata->stats.rx_dropped++;
+ }
+ }
+ /* At this point, the packet is to be read out
+ * of the fifo and discarded */
+ smsc911x_rx_fastforward(pdata, pktlength);
+ }
+
+ pdata->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
+ smsc911x_reg_write(INT_STS_RSFL_, pdata, INT_STS);
+
+ if (npackets < budget) {
+ unsigned int temp;
+ /* We processed all packets available. Tell NAPI it can
+ * stop polling then re-enable rx interrupts */
+ netif_rx_complete(dev, napi);
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= INT_EN_RSFL_EN_;
+ smsc911x_reg_write(temp, pdata, INT_EN);
+ }
+
+ /* There are still packets waiting */
+ return npackets;
+}
+
+/* Returns hash bit number for given MAC address
+ * Example:
+ * 01 00 5E 00 00 01 -> returns bit number 31 */
+static unsigned int smsc911x_hash(char addr[ETH_ALEN])
+{
+ unsigned int crc;
+ unsigned int result;
+
+ crc = ether_crc(ETH_ALEN, addr);
+ result = (crc >> 26) & 0x3f;
+
+ return result;
+}
+
+static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
+{
+ /* Performs the multicast & mac_cr update. This is called when
+ * safe on the current hardware, and with the phy_lock held */
+ unsigned int mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ mac_cr |= pdata->set_bits_mask;
+ mac_cr &= ~(pdata->clear_bits_mask);
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+ smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
+ smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
+ SMSC_TRACE("maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X", mac_cr,
+ pdata->hashhi, pdata->hashlo);
+}
+
+static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
+{
+ unsigned int mac_cr;
+
+ /* This function is only called for older LAN911x devices
+ * (revA or revB), where MAC_CR, HASHH and HASHL should not
+ * be modified during Rx - newer devices immediately update the
+ * registers.
+ *
+ * This is called from interrupt context */
+
+ spin_lock(&pdata->phy_lock);
+
+ /* Check Rx has stopped */
+ if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
+ SMSC_WARNING("Rx not stopped\n");
+
+ /* Perform the update - safe to do now Rx has stopped */
+ smsc911x_rx_multicast_update(pdata);
+
+ /* Re-enable Rx */
+ mac_cr = smsc911x_mac_read(pdata, MAC_CR);
+ mac_cr |= MAC_CR_RXEN_;
+ smsc911x_mac_write(pdata, MAC_CR, mac_cr);
+
+ pdata->multicast_update_pending = 0;
+
+ spin_unlock(&pdata->phy_lock);
+}
+
+/* Sets the device MAC address to dev_addr, called with phy_lock held */
+static void
+smsc911x_set_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
+{
+ u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
+ u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
+ (dev_addr[1] << 8) | dev_addr[0];
+
+ smsc911x_mac_write(pdata, ADDRH, mac_high16);
+ smsc911x_mac_write(pdata, ADDRL, mac_low32);
+}
+
+static int smsc911x_open(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int timeout;
+ unsigned int temp;
+ unsigned int intcfg;
+
+ spin_lock_init(&pdata->phy_lock);
+
+ /* Reset the LAN911x */
+ smsc911x_reg_write(HW_CFG_SRST_, pdata, HW_CFG);
+ timeout = 10;
+ do {
+ udelay(10);
+ temp = smsc911x_reg_read(pdata, HW_CFG);
+ } while ((--timeout) && (temp & HW_CFG_SRST_));
+
+ if (unlikely(temp & HW_CFG_SRST_)) {
+ SMSC_WARNING("Failed to complete reset");
+ return -ENODEV;
+ }
+
+ smsc911x_reg_write(0x00050000, pdata, HW_CFG);
+ smsc911x_reg_write(0x006E3740, pdata, AFC_CFG);
+
+ /* Make sure EEPROM has finished loading before setting GPIO_CFG */
+ timeout = 50;
+ while ((timeout--) &&
+ (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_)) {
+ udelay(10);
+ }
+
+ if (unlikely(timeout == 0)) {
+ SMSC_WARNING("Timed out waiting for EEPROM "
+ "busy bit to clear");
+ }
+#if USE_DEBUG >= 1
+ smsc911x_reg_write(0x00670700, pdata, GPIO_CFG);
+#else
+ smsc911x_reg_write(0x70070000, pdata, GPIO_CFG);
+#endif
+
+ /* Initialise irqs, but leave all sources disabled */
+ smsc911x_reg_write(0, pdata, INT_EN);
+ smsc911x_reg_write(0xFFFFFFFF, pdata, INT_STS);
+
+ /* Set interrupt deassertion to 100uS */
+ intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
+
+ if (pdata->irq_polarity) {
+ SMSC_TRACE("irq polarity: active high");
+ intcfg |= INT_CFG_IRQ_POL_;
+ } else {
+ SMSC_TRACE("irq polarity: active low");
+ }
+
+ if (pdata->irq_type) {
+ SMSC_TRACE("irq type: push-pull");
+ intcfg |= INT_CFG_IRQ_TYPE_;
+ } else {
+ SMSC_TRACE("irq type: open drain");
+ }
+
+ smsc911x_reg_write(intcfg, pdata, INT_CFG);
+
+ SMSC_TRACE("Testing irq handler using IRQ %d", dev->irq);
+ pdata->software_irq_signal = 0;
+ smp_wmb();
+
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= INT_EN_SW_INT_EN_;
+ smsc911x_reg_write(temp, pdata, INT_EN);
+
+ timeout = 1000;
+ while (timeout--) {
+ smp_rmb();
+ if (pdata->software_irq_signal)
+ break;
+ msleep(1);
+ }
+
+ if (!pdata->software_irq_signal) {
+ printk(KERN_WARNING "%s: ISR failed signaling test (IRQ %d)\n",
+ dev->name, dev->irq);
+ return -ENODEV;
+ }
+ SMSC_TRACE("IRQ handler passed test using IRQ %d", dev->irq);
+
+ printk(KERN_INFO "%s: SMSC911x/921x identified at %#08lx, IRQ: %d\n",
+ dev->name, (unsigned long)pdata->ioaddr, dev->irq);
+
+ netif_carrier_off(dev);
+
+ if (!smsc911x_phy_initialise(dev)) {
+ SMSC_WARNING("Failed to initialize PHY");
+ return -ENODEV;
+ }
+
+ smsc911x_set_mac_address(pdata, dev->dev_addr);
+
+ temp = smsc911x_reg_read(pdata, HW_CFG);
+ temp &= HW_CFG_TX_FIF_SZ_;
+ temp |= HW_CFG_SF_;
+ smsc911x_reg_write(temp, pdata, HW_CFG);
+
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ temp &= ~(FIFO_INT_RX_STS_LEVEL_);
+ smsc911x_reg_write(temp, pdata, FIFO_INT);
+
+ /* set RX Data offset to 2 bytes for alignment */
+ smsc911x_reg_write((2 << 8), pdata, RX_CFG);
+
+ napi_enable(&pdata->napi);
+
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_ | INT_EN_PHY_INT_EN_);
+ smsc911x_reg_write(temp, pdata, INT_EN);
+
+ spin_lock_irq(&pdata->phy_lock);
+ temp = smsc911x_mac_read(pdata, MAC_CR);
+ temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+ smsc911x_mac_write(pdata, MAC_CR, temp);
+ spin_unlock_irq(&pdata->phy_lock);
+
+ smsc911x_reg_write(TX_CFG_TX_ON_, pdata, TX_CFG);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+/* Entry point for stopping the interface */
+static int smsc911x_stop(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ napi_disable(&pdata->napi);
+
+ pdata->stop_link_poll = 1;
+ del_timer_sync(&pdata->link_poll_timer);
+
+ smsc911x_reg_write((smsc911x_reg_read(pdata, INT_CFG) &
+ (~INT_CFG_IRQ_EN_)), pdata, INT_CFG);
+ netif_stop_queue(dev);
+
+ /* At this point all Rx and Tx activity is stopped */
+ pdata->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
+ smsc911x_tx_update_txcounters(pdata);
+
+ SMSC_TRACE("Interface stopped");
+ return 0;
+}
+
+/* Entry point for transmitting a packet */
+static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int freespace;
+ unsigned int tx_cmd_a;
+ unsigned int tx_cmd_b;
+ unsigned int temp;
+ u32 wrsz;
+ u32 bufp;
+
+ freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
+
+ if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
+ SMSC_WARNING("Tx data fifo low, space available: %d",
+ freespace);
+
+ /* Word alignment adjustment */
+ tx_cmd_a = ((((unsigned int)(skb->data)) & 0x03) << 16);
+ tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+ tx_cmd_a |= (unsigned int)skb->len;
+
+ tx_cmd_b = ((unsigned int)skb->len) << 16;
+ tx_cmd_b |= (unsigned int)skb->len;
+
+ smsc911x_reg_write(tx_cmd_a, pdata, TX_DATA_FIFO);
+ smsc911x_reg_write(tx_cmd_b, pdata, TX_DATA_FIFO);
+
+ bufp = ((u32)skb->data) & 0xFFFFFFFC;
+ wrsz = (u32)skb->len + 3;
+ wrsz += ((u32)skb->data) & 0x3;
+ wrsz >>= 2;
+
+ smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+ freespace -= (skb->len + 32);
+ dev_kfree_skb(skb);
+ dev->trans_start = jiffies;
+
+ if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
+ smsc911x_tx_update_txcounters(pdata);
+
+ if (freespace < TX_FIFO_LOW_THRESHOLD) {
+ netif_stop_queue(dev);
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp &= 0x00FFFFFF;
+ temp |= 0x32000000;
+ smsc911x_reg_write(temp, pdata, FIFO_INT);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Entry point for getting status counters */
+static struct net_device_stats *smsc911x_get_stats(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ smsc911x_tx_update_txcounters(pdata);
+ pdata->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
+ return &pdata->stats;
+}
+
+/* Entry point for setting addressing modes */
+static void smsc911x_set_multicast_list(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Enabling promiscuous mode */
+ pdata->set_bits_mask = MAC_CR_PRMS_;
+ pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ /* Enabling all multicast mode */
+ pdata->set_bits_mask = MAC_CR_MCPAS_;
+ pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ } else if (dev->mc_count > 0) {
+ /* Enabling specific multicast addresses */
+ unsigned int hash_high = 0;
+ unsigned int hash_low = 0;
+ unsigned int count = 0;
+ struct dev_mc_list *mc_list = dev->mc_list;
+
+ pdata->set_bits_mask = MAC_CR_HPFILT_;
+ pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+ while (mc_list) {
+ count++;
+ if ((mc_list->dmi_addrlen) == ETH_ALEN) {
+ unsigned int bitnum =
+ smsc911x_hash(mc_list->dmi_addr);
+ unsigned int mask = 0x01 << (bitnum & 0x1F);
+ if (bitnum & 0x20)
+ hash_high |= mask;
+ else
+ hash_low |= mask;
+ } else {
+ SMSC_WARNING("dmi_addrlen != 6");
+ }
+ mc_list = mc_list->next;
+ }
+ if (count != (unsigned int)dev->mc_count)
+ SMSC_WARNING("mc_count != dev->mc_count");
+
+ pdata->hashhi = hash_high;
+ pdata->hashlo = hash_low;
+ } else {
+ /* Enabling local MAC address only */
+ pdata->set_bits_mask = 0;
+ pdata->clear_bits_mask =
+ (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
+ pdata->hashhi = 0;
+ pdata->hashlo = 0;
+ }
+
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+
+ if (pdata->generation <= 1) {
+ /* Older hardware revision - cannot change these flags while
+ * receiving data */
+ if (!pdata->multicast_update_pending) {
+ unsigned int temp;
+ SMSC_TRACE("scheduling mcast update");
+ pdata->multicast_update_pending = 1;
+
+ /* Request the hardware to stop, then perform the
+ * update when we get an RX_STOP interrupt */
+ smsc911x_reg_write(INT_STS_RXSTOP_INT_, pdata, INT_STS);
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp |= INT_EN_RXSTOP_INT_EN_;
+ smsc911x_reg_write(temp, pdata, INT_EN);
+
+ temp = smsc911x_mac_read(pdata, MAC_CR);
+ temp &= ~(MAC_CR_RXEN_);
+ smsc911x_mac_write(pdata, MAC_CR, temp);
+ } else {
+ /* There is another update pending, this should now
+ * use the newer values */
+ }
+ } else {
+ /* Newer hardware revision - can write immediately */
+ smsc911x_rx_multicast_update(pdata);
+ }
+
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+}
+
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned int intsts;
+ unsigned int inten;
+ unsigned int temp;
+ int serviced = IRQ_NONE;
+
+ intsts = smsc911x_reg_read(pdata, INT_STS);
+ inten = smsc911x_reg_read(pdata, INT_EN);
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(temp, pdata, INT_EN);
+ smsc911x_reg_write(INT_STS_SW_INT_, pdata, INT_STS);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE("RX Stop interrupt");
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RXSTOP_INT_EN_);
+ smsc911x_reg_write(temp, pdata, INT_EN);
+ smsc911x_reg_write(INT_STS_RXSTOP_INT_, pdata, INT_STS);
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(temp, pdata, FIFO_INT);
+ smsc911x_reg_write(INT_STS_TDFA_, pdata, INT_STS);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ smsc911x_reg_write(INT_STS_RXE_, pdata, INT_STS);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ /* Disable Rx interrupts and schedule NAPI poll */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(temp, pdata, INT_EN);
+ netif_rx_schedule(dev, &pdata->napi);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_PHY_INT_)) {
+ smsc911x_reg_write(INT_STS_PHY_INT_, pdata, INT_STS);
+ spin_lock(&pdata->phy_lock);
+ temp = smsc911x_phy_read(pdata, MII_INTSTS);
+ spin_unlock(&pdata->phy_lock);
+ SMSC_TRACE("PHY interrupt, sts 0x%04X", (u16)temp);
+ smsc911x_phy_update_linkmode(dev, 0);
+ serviced = IRQ_HANDLED;
+ }
+ return serviced;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void smsc911x_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smsc911x_irqhandler(0, dev);
+ enable_irq(dev->irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+/* Standard ioctls for mii-tool */
+static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+ unsigned long flags;
+
+ SMSC_TRACE("ioctl cmd 0x%x", cmd);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = pdata->mii.phy_id;
+ return 0;
+ case SIOCGMIIREG:
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ data->val_out = smsc911x_phy_read(pdata, data->reg_num);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+ return 0;
+ case SIOCSMIIREG:
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ smsc911x_phy_write(pdata, data->reg_num, data->val_in);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+ return 0;
+ }
+
+ SMSC_TRACE("unsupported ioctl cmd");
+ return -1;
+}
+
+static int
+smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ cmd->maxtxpkt = 1;
+ cmd->maxrxpkt = 1;
+ return mii_ethtool_gset(&pdata->mii, cmd);
+}
+
+static int
+smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ return mii_ethtool_sset(&pdata->mii, cmd);
+}
+
+static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
+ strncpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
+ strncpy(info->bus_info, dev->dev.bus_id, sizeof(info->bus_info));
+}
+
+static int smsc911x_ethtool_nwayreset(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ return mii_nway_restart(&pdata->mii);
+}
+
+static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ return pdata->msg_enable;
+}
+
+static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ pdata->msg_enable = level;
+}
+
+static int smsc911x_ethtool_getregslen(struct net_device *dev)
+{
+ return (((E2P_CMD - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) *
+ sizeof(u32);
+}
+
+static void
+smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int i;
+ unsigned int j = 0;
+ u32 *data = buf;
+
+ regs->version = pdata->idrev;
+ for (i = ID_REV; i <= E2P_CMD; i += (sizeof(u32)))
+ data[j++] = smsc911x_reg_read(pdata, i);
+
+ spin_lock_irqsave(&pdata->phy_lock, flags);
+ for (i = MAC_CR; i <= WUCSR; i++)
+ data[j++] = smsc911x_mac_read(pdata, i);
+ for (i = 0; i <= 31; i++)
+ data[j++] = smsc911x_phy_read(pdata, i);
+ spin_unlock_irqrestore(&pdata->phy_lock, flags);
+}
+
+static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
+{
+ unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
+ temp &= ~GPIO_CFG_EEPR_EN_;
+ smsc911x_reg_write(temp, pdata, GPIO_CFG);
+ msleep(1);
+}
+
+static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
+{
+ int timeout = 100;
+ u32 e2cmd;
+
+ SMSC_TRACE("op 0x%08x", op);
+ if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
+ SMSC_WARNING("Busy at start");
+ return -EBUSY;
+ }
+
+ e2cmd = op | E2P_CMD_EPC_BUSY_;
+ smsc911x_reg_write(e2cmd, pdata, E2P_CMD);
+
+ do {
+ msleep(1);
+ e2cmd = smsc911x_reg_read(pdata, E2P_CMD);
+ } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--));
+
+ if (!timeout) {
+ SMSC_TRACE("TIMED OUT");
+ return -EAGAIN;
+ }
+
+ if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
+ SMSC_TRACE("Error occured during eeprom operation");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
+ u8 address, u8 *data)
+{
+ u32 op = E2P_CMD_EPC_CMD_READ_ | address;
+ int ret;
+
+ SMSC_TRACE("address 0x%x", address);
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+
+ if (!ret)
+ data[address] = smsc911x_reg_read(pdata, E2P_DATA);
+
+ return ret;
+}
+
+static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
+ u8 address, u8 data)
+{
+ u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
+ int ret;
+
+ SMSC_TRACE("address 0x%x, data 0x%x", address, data);
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+
+ if (!ret) {
+ op = E2P_CMD_EPC_CMD_WRITE_ | address;
+ smsc911x_reg_write((u32)data, pdata, E2P_DATA);
+ ret = smsc911x_eeprom_send_cmd(pdata, op);
+ }
+
+ return ret;
+}
+
+static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev)
+{
+ return SMSC911X_EEPROM_SIZE;
+}
+
+static int smsc911x_ethtool_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u8 eeprom_data[SMSC911X_EEPROM_SIZE];
+ int len;
+ int i;
+
+ smsc911x_eeprom_enable_access(pdata);
+
+ len = min(eeprom->len, SMSC911X_EEPROM_SIZE);
+ for (i = 0; i < len; i++) {
+ int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data);
+ if (ret < 0) {
+ eeprom->len = 0;
+ return ret;
+ }
+ }
+
+ memcpy(data, &eeprom_data[eeprom->offset], len);
+ eeprom->len = len;
+ return 0;
+}
+
+static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int ret;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_eeprom_enable_access(pdata);
+ smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_);
+ ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data);
+ smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_);
+
+ /* Single byte write, according to man page */
+ eeprom->len = 1;
+
+ return ret;
+}
+
+static struct ethtool_ops smsc911x_ethtool_ops = {
+ .get_settings = smsc911x_ethtool_getsettings,
+ .set_settings = smsc911x_ethtool_setsettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = smsc911x_ethtool_getdrvinfo,
+ .nway_reset = smsc911x_ethtool_nwayreset,
+ .get_msglevel = smsc911x_ethtool_getmsglevel,
+ .set_msglevel = smsc911x_ethtool_setmsglevel,
+ .get_regs_len = smsc911x_ethtool_getregslen,
+ .get_regs = smsc911x_ethtool_getregs,
+ .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
+ .get_eeprom = smsc911x_ethtool_get_eeprom,
+ .set_eeprom = smsc911x_ethtool_set_eeprom,
+};
+
+/* Initializing private device structures */
+static int smsc911x_init(struct net_device *dev)
+{
+ u32 mac_high16;
+ u32 mac_low32;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ SMSC_TRACE("Driver Parameters:");
+ SMSC_TRACE("LAN base: 0x%08lX", (unsigned long)pdata->ioaddr);
+ SMSC_TRACE("IRQ: %d", dev->irq);
+ SMSC_TRACE("PHY will be autodetected.");
+
+ if (pdata->ioaddr == 0) {
+ SMSC_WARNING("pdata->ioaddr: 0x00000000");
+ return -ENODEV;
+ }
+
+ /* Default generation to zero (all workarounds apply) */
+ pdata->generation = 0;
+
+ pdata->idrev = smsc911x_reg_read(pdata, ID_REV);
+ if (((pdata->idrev >> 16) & 0xFFFF) == (pdata->idrev & 0xFFFF)) {
+ SMSC_WARNING("idrev top 16 bits equal to bottom 16 bits, "
+ "idrev: 0x%08X", pdata->idrev);
+ SMSC_TRACE("This may mean the chip is set for 32 bit while "
+ "the bus is reading as 16 bit");
+ return -ENODEV;
+ }
+ switch (pdata->idrev & 0xFFFF0000) {
+ case 0x01180000:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE("LAN9118 Beacon identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 0;
+ break;
+ case 1UL:
+ SMSC_TRACE
+ ("LAN9118 Concord A0 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 1;
+ break;
+ case 2UL:
+ SMSC_TRACE
+ ("LAN9118 Concord A1 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9118 Concord A1 identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ }
+ break;
+
+ case 0x01170000:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE("LAN9117 Beacon identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 0;
+ break;
+ case 1UL:
+ SMSC_TRACE
+ ("LAN9117 Concord A0 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 1;
+ break;
+ case 2UL:
+ SMSC_TRACE
+ ("LAN9117 Concord A1 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9117 Concord A1 identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ }
+ break;
+
+ case 0x01160000:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_WARNING("LAN911x not identified, idrev: 0x%08X",
+ pdata->idrev);
+ return -ENODEV;
+ case 1UL:
+ SMSC_TRACE
+ ("LAN9116 Concord A0 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 1;
+ break;
+ case 2UL:
+ SMSC_TRACE
+ ("LAN9116 Concord A1 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9116 Concord A1 identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ }
+ break;
+
+ case 0x01150000:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_WARNING("LAN911x not identified, idrev: 0x%08X",
+ pdata->idrev);
+ return -ENODEV;
+ case 1UL:
+ SMSC_TRACE
+ ("LAN9115 Concord A0 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 1;
+ break;
+ case 2UL:
+ SMSC_TRACE
+ ("LAN9115 Concord A1 identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9115 Concord A1 identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 2;
+ break;
+ }
+ break;
+
+ case 0x118A0000UL:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE
+ ("LAN9218 Boylston identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9218 Boylston identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ }
+ break;
+
+ case 0x117A0000UL:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE
+ ("LAN9217 Boylston identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9217 Boylston identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ }
+ break;
+
+ case 0x116A0000UL:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE
+ ("LAN9216 Boylston identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9216 Boylston identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ }
+ break;
+
+ case 0x115A0000UL:
+ switch (pdata->idrev & 0x0000FFFFUL) {
+ case 0UL:
+ SMSC_TRACE
+ ("LAN9215 Boylston identified, idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ default:
+ SMSC_TRACE
+ ("LAN9215 Boylston identified (NEW), idrev: 0x%08X",
+ pdata->idrev);
+ pdata->generation = 3;
+ break;
+ }
+ break;
+
+ default:
+ SMSC_WARNING("LAN911x not identified, idrev: 0x%08X",
+ pdata->idrev);
+ return -ENODEV;
+ }
+
+ if (pdata->generation == 0)
+ SMSC_WARNING("This driver is not intended "
+ "for this chip revision");
+
+ ether_setup(dev);
+ dev->open = smsc911x_open;
+ dev->stop = smsc911x_stop;
+ dev->hard_start_xmit = smsc911x_hard_start_xmit;
+ dev->get_stats = smsc911x_get_stats;
+ dev->set_multicast_list = smsc911x_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+ dev->do_ioctl = smsc911x_do_ioctl;
+ netif_napi_add(dev, &pdata->napi, smsc911x_poll, 64);
+ dev->ethtool_ops = &smsc911x_ethtool_ops;
+
+ /* Check if mac address has been specified when bringing interface up */
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ smsc911x_set_mac_address(pdata, dev->dev_addr);
+ SMSC_TRACE("MAC Address is specified by configuration");
+ } else {
+ /* Try reading mac address from device. if EEPROM is present
+ * it will already have been set */
+ u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
+ u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ dev->dev_addr[0] = (u8)(mac_low32);
+ dev->dev_addr[1] = (u8)(mac_low32 >> 8);
+ dev->dev_addr[2] = (u8)(mac_low32 >> 16);
+ dev->dev_addr[3] = (u8)(mac_low32 >> 24);
+ dev->dev_addr[4] = (u8)(mac_high16);
+ dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+
+ if (is_valid_ether_addr(dev->dev_addr)) {
+ /* eeprom values are valid so use them */
+ SMSC_TRACE("Mac Address is read from LAN911x EEPROM");
+ } else {
+ /* eeprom values are invalid, generate random MAC */
+ random_ether_addr(dev->dev_addr);
+ smsc911x_set_mac_address(pdata, dev->dev_addr);
+ SMSC_TRACE("MAC Address is set to random_ether_addr");
+ }
+ }
+
+ printk(KERN_INFO
+ "%s: SMSC911x MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dev->name, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+ dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = smsc911x_poll_controller;
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
+ pdata->mii.phy_id_mask = 0x1f;
+ pdata->mii.reg_num_mask = 0x1f;
+ pdata->mii.force_media = 0;
+ pdata->mii.full_duplex = 0;
+ pdata->mii.dev = dev;
+ pdata->mii.mdio_read = smsc911x_mdio_read;
+ pdata->mii.mdio_write = smsc911x_mdio_write;
+
+ pdata->msg_enable = NETIF_MSG_LINK;
+
+ return 0;
+}
+
+static int smsc911x_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct smsc911x_data *pdata;
+ struct resource *res;
+
+ dev = platform_get_drvdata(pdev);
+ BUG_ON(!dev);
+ pdata = netdev_priv(dev);
+ BUG_ON(!pdata);
+ BUG_ON(!pdata->ioaddr);
+
+ SMSC_TRACE("Stopping driver.");
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smsc911x-memory");
+ if (!res)
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ release_mem_region(res->start, res->end - res->start);
+
+ iounmap(pdata->ioaddr);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static int smsc911x_drv_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct smsc911x_data *pdata;
+ struct resource *res;
+ unsigned int intcfg = 0;
+ int res_size;
+ int retval;
+
+ printk(KERN_INFO "%s: Driver version %s.\n", SMSC_CHIPNAME,
+ SMSC_DRV_VERSION);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smsc911x-memory");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ printk(KERN_WARNING "%s: Could not allocate resource.\n",
+ SMSC_CHIPNAME);
+ retval = -ENODEV;
+ goto out_0;
+ }
+ res_size = res->end - res->start;
+
+ if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) {
+ retval = -EBUSY;
+ goto out_0;
+ }
+
+ dev = alloc_etherdev(sizeof(struct smsc911x_data));
+ if (!dev) {
+ printk(KERN_WARNING "%s: Could not allocate device.\n",
+ SMSC_CHIPNAME);
+ retval = -ENOMEM;
+ goto out_release_io_1;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ pdata = netdev_priv(dev);
+ pdata->netdev = dev;
+
+ dev->irq = platform_get_irq(pdev, 0);
+ pdata->ioaddr = ioremap_nocache(res->start, res_size);
+
+ /* copy config parameters across if present, otherwise pdata
+ * defaults to zeros */
+ if (pdev->dev.platform_data) {
+ struct smsc911x_platform_config *config = pdev->dev.platform_data;
+ pdata->irq_polarity = config->irq_polarity;
+ pdata->irq_type = config->irq_type;
+ }
+
+ if (pdata->ioaddr == NULL) {
+ SMSC_WARNING("Error smsc911x base address invalid");
+ retval = -ENOMEM;
+ goto out_free_netdev_2;
+ }
+
+ if ((retval = smsc911x_init(dev)) < 0)
+ goto out_unmap_io_3;
+
+ /* configure irq polarity and type before connecting isr */
+ if (pdata->irq_polarity)
+ intcfg |= INT_CFG_IRQ_POL_;
+
+ if (pdata->irq_type)
+ intcfg |= INT_CFG_IRQ_TYPE_;
+
+ smsc911x_reg_write(intcfg, pdata, INT_CFG);
+
+ retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED,
+ SMSC_CHIPNAME, dev);
+ if (retval) {
+ SMSC_WARNING("Unable to claim requested irq: %d", dev->irq);
+ goto out_unmap_io_3;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ retval = register_netdev(dev);
+ if (retval) {
+ SMSC_WARNING("Error %i registering device", retval);
+ goto out_unset_drvdata_4;
+ } else {
+ SMSC_TRACE("Network interface: \"%s\"", dev->name);
+ }
+
+ return 0;
+
+out_unset_drvdata_4:
+ platform_set_drvdata(pdev, NULL);
+ free_irq(dev->irq, dev);
+out_unmap_io_3:
+ iounmap(pdata->ioaddr);
+out_free_netdev_2:
+ free_netdev(dev);
+out_release_io_1:
+ release_mem_region(res->start, res->end - res->start);
+out_0:
+ return retval;
+}
+
+static struct platform_driver smsc911x_driver = {
+ .probe = smsc911x_drv_probe,
+ .remove = smsc911x_drv_remove,
+ .driver = {
+ .name = SMSC_CHIPNAME,
+ },
+};
+
+/* Entry point for loading the module */
+static int __init smsc911x_init_module(void)
+{
+ return platform_driver_register(&smsc911x_driver);
+}
+
+/* entry point for unloading the module */
+static void __exit smsc911x_cleanup_module(void)
+{
+ platform_driver_unregister(&smsc911x_driver);
+}
+
+module_init(smsc911x_init_module);
+module_exit(smsc911x_cleanup_module);
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
new file mode 100644
index 000000000000..d67c9971b136
--- /dev/null
+++ b/drivers/net/smsc911x.h
@@ -0,0 +1,395 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2007 SMSC
+ * Copyright (C) 2005 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************/
+#ifndef __SMSC911X_H__
+#define __SMSC911X_H__
+
+#if defined(CONFIG_MACH_MX37_3DS) || defined(CONFIG_MACH_MX25_3DS)
+#define SMSC_CAN_USE_SPI 1
+#define SMSC_CAN_USE_32BIT 0
+#elif defined(CONFIG_MACH_MX31_3DS) || defined(CONFIG_MACH_MX35_3DS)
+#define SMSC_CAN_USE_32BIT 0
+#define SMSC_CAN_USE_SPI 0
+#else
+#define SMSC_CAN_USE_32BIT 1
+#define SMSC_CAN_USE_SPI 0
+#endif
+
+#define TX_FIFO_LOW_THRESHOLD (u32)1600
+#define SMSC911X_EEPROM_SIZE (u32)7
+#define USE_DEBUG 0
+
+/* implements a PHY loopback test at initialisation time, to ensure a packet
+ * can be succesfully looped back */
+#define USE_PHY_WORK_AROUND
+
+/* 10/100 LED link-state inversion when media is disconnected */
+#define USE_LED1_WORK_AROUND
+
+/* platform_device configuration data, should be assigned to
+ * the platform_device's dev.platform_data */
+struct smsc911x_platform_config {
+ unsigned int irq_polarity;
+ unsigned int irq_type;
+};
+
+#if USE_DEBUG >= 1
+#define SMSC_WARNING(fmt, args...) \
+ printk(KERN_EMERG "SMSC_WARNING: %s: " fmt "\n", \
+ __FUNCTION__ , ## args)
+#else
+#define SMSC_WARNING(msg, args...)
+#endif /* USE_DEBUG >= 1 */
+
+#if USE_DEBUG >= 2
+#define SMSC_TRACE(fmt,args...) \
+ printk(KERN_EMERG "SMSC_TRACE: %s: " fmt "\n", \
+ __FUNCTION__ , ## args)
+#else
+#define SMSC_TRACE(msg,args...)
+#endif /* USE_DEBUG >= 2 */
+
+/* SMSC911x registers and bitfields */
+#define RX_DATA_FIFO 0x00
+
+#define TX_DATA_FIFO 0x20
+#define TX_CMD_A_ON_COMP_ 0x80000000
+#define TX_CMD_A_BUF_END_ALGN_ 0x03000000
+#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000
+#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000
+#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000
+#define TX_CMD_A_DATA_OFFSET_ 0x001F0000
+#define TX_CMD_A_FIRST_SEG_ 0x00002000
+#define TX_CMD_A_LAST_SEG_ 0x00001000
+#define TX_CMD_A_BUF_SIZE_ 0x000007FF
+#define TX_CMD_B_PKT_TAG_ 0xFFFF0000
+#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000
+#define TX_CMD_B_DISABLE_PADDING_ 0x00001000
+#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF
+
+#define RX_STATUS_FIFO 0x40
+#define RX_STS_ES_ 0x00008000
+#define RX_STS_MCAST_ 0x00000400
+
+#define RX_STATUS_FIFO_PEEK 0x44
+
+#define TX_STATUS_FIFO 0x48
+#define TX_STS_ES_ 0x00008000
+
+#define TX_STATUS_FIFO_PEEK 0x4C
+
+#define ID_REV 0x50
+#define ID_REV_CHIP_ID_ 0xFFFF0000
+#define ID_REV_REV_ID_ 0x0000FFFF
+
+#define INT_CFG 0x54
+#define INT_CFG_INT_DEAS_ 0xFF000000
+#define INT_CFG_INT_DEAS_CLR_ 0x00004000
+#define INT_CFG_INT_DEAS_STS_ 0x00002000
+#define INT_CFG_IRQ_INT_ 0x00001000
+#define INT_CFG_IRQ_EN_ 0x00000100
+#define INT_CFG_IRQ_POL_ 0x00000010
+#define INT_CFG_IRQ_TYPE_ 0x00000001
+
+#define INT_STS 0x58
+#define INT_STS_SW_INT_ 0x80000000
+#define INT_STS_TXSTOP_INT_ 0x02000000
+#define INT_STS_RXSTOP_INT_ 0x01000000
+#define INT_STS_RXDFH_INT_ 0x00800000
+#define INT_STS_RXDF_INT_ 0x00400000
+#define INT_STS_TX_IOC_ 0x00200000
+#define INT_STS_RXD_INT_ 0x00100000
+#define INT_STS_GPT_INT_ 0x00080000
+#define INT_STS_PHY_INT_ 0x00040000
+#define INT_STS_PME_INT_ 0x00020000
+#define INT_STS_TXSO_ 0x00010000
+#define INT_STS_RWT_ 0x00008000
+#define INT_STS_RXE_ 0x00004000
+#define INT_STS_TXE_ 0x00002000
+#define INT_STS_TDFU_ 0x00000800
+#define INT_STS_TDFO_ 0x00000400
+#define INT_STS_TDFA_ 0x00000200
+#define INT_STS_TSFF_ 0x00000100
+#define INT_STS_TSFL_ 0x00000080
+#define INT_STS_RXDF_ 0x00000040
+#define INT_STS_RDFL_ 0x00000020
+#define INT_STS_RSFF_ 0x00000010
+#define INT_STS_RSFL_ 0x00000008
+#define INT_STS_GPIO2_INT_ 0x00000004
+#define INT_STS_GPIO1_INT_ 0x00000002
+#define INT_STS_GPIO0_INT_ 0x00000001
+
+#define INT_EN 0x5C
+#define INT_EN_SW_INT_EN_ 0x80000000
+#define INT_EN_TXSTOP_INT_EN_ 0x02000000
+#define INT_EN_RXSTOP_INT_EN_ 0x01000000
+#define INT_EN_RXDFH_INT_EN_ 0x00800000
+#define INT_EN_TIOC_INT_EN_ 0x00200000
+#define INT_EN_RXD_INT_EN_ 0x00100000
+#define INT_EN_GPT_INT_EN_ 0x00080000
+#define INT_EN_PHY_INT_EN_ 0x00040000
+#define INT_EN_PME_INT_EN_ 0x00020000
+#define INT_EN_TXSO_EN_ 0x00010000
+#define INT_EN_RWT_EN_ 0x00008000
+#define INT_EN_RXE_EN_ 0x00004000
+#define INT_EN_TXE_EN_ 0x00002000
+#define INT_EN_TDFU_EN_ 0x00000800
+#define INT_EN_TDFO_EN_ 0x00000400
+#define INT_EN_TDFA_EN_ 0x00000200
+#define INT_EN_TSFF_EN_ 0x00000100
+#define INT_EN_TSFL_EN_ 0x00000080
+#define INT_EN_RXDF_EN_ 0x00000040
+#define INT_EN_RDFL_EN_ 0x00000020
+#define INT_EN_RSFF_EN_ 0x00000010
+#define INT_EN_RSFL_EN_ 0x00000008
+#define INT_EN_GPIO2_INT_ 0x00000004
+#define INT_EN_GPIO1_INT_ 0x00000002
+#define INT_EN_GPIO0_INT_ 0x00000001
+
+#define BYTE_TEST 0x64
+
+#define FIFO_INT 0x68
+#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000
+#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000
+#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00
+#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF
+
+#define RX_CFG 0x6C
+#define RX_CFG_RX_END_ALGN_ 0xC0000000
+#define RX_CFG_RX_END_ALGN4_ 0x00000000
+#define RX_CFG_RX_END_ALGN16_ 0x40000000
+#define RX_CFG_RX_END_ALGN32_ 0x80000000
+#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000
+#define RX_CFG_RX_DUMP_ 0x00008000
+#define RX_CFG_RXDOFF_ 0x00001F00
+
+#define TX_CFG 0x70
+#define TX_CFG_TXS_DUMP_ 0x00008000
+#define TX_CFG_TXD_DUMP_ 0x00004000
+#define TX_CFG_TXSAO_ 0x00000004
+#define TX_CFG_TX_ON_ 0x00000002
+#define TX_CFG_STOP_TX_ 0x00000001
+
+#define HW_CFG 0x74
+#define HW_CFG_TTM_ 0x00200000
+#define HW_CFG_SF_ 0x00100000
+#define HW_CFG_TX_FIF_SZ_ 0x000F0000
+#define HW_CFG_TR_ 0x00003000
+#define HW_CFG_SRST_ 0x00000001
+
+/* only available on 115/117 */
+#define HW_CFG_PHY_CLK_SEL_ 0x00000060
+#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000
+#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020
+#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040
+#define HW_CFG_SMI_SEL_ 0x00000010
+#define HW_CFG_EXT_PHY_DET_ 0x00000008
+#define HW_CFG_EXT_PHY_EN_ 0x00000004
+#define HW_CFG_SRST_TO_ 0x00000002
+
+/* only available on 116/118 */
+#define HW_CFG_32_16_BIT_MODE_ 0x00000004
+
+#define RX_DP_CTRL 0x78
+#define RX_DP_CTRL_RX_FFWD_ 0x80000000
+
+#define RX_FIFO_INF 0x7C
+#define RX_FIFO_INF_RXSUSED_ 0x00FF0000
+#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF
+
+#define TX_FIFO_INF 0x80
+#define TX_FIFO_INF_TSUSED_ 0x00FF0000
+#define TX_FIFO_INF_TDFREE_ 0x0000FFFF
+
+#define PMT_CTRL 0x84
+#define PMT_CTRL_PM_MODE_ 0x00003000
+#define PMT_CTRL_PM_MODE_D0_ 0x00000000
+#define PMT_CTRL_PM_MODE_D1_ 0x00001000
+#define PMT_CTRL_PM_MODE_D2_ 0x00002000
+#define PMT_CTRL_PM_MODE_D3_ 0x00003000
+#define PMT_CTRL_PHY_RST_ 0x00000400
+#define PMT_CTRL_WOL_EN_ 0x00000200
+#define PMT_CTRL_ED_EN_ 0x00000100
+#define PMT_CTRL_PME_TYPE_ 0x00000040
+#define PMT_CTRL_WUPS_ 0x00000030
+#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000
+#define PMT_CTRL_WUPS_ED_ 0x00000010
+#define PMT_CTRL_WUPS_WOL_ 0x00000020
+#define PMT_CTRL_WUPS_MULTI_ 0x00000030
+#define PMT_CTRL_PME_IND_ 0x00000008
+#define PMT_CTRL_PME_POL_ 0x00000004
+#define PMT_CTRL_PME_EN_ 0x00000002
+#define PMT_CTRL_READY_ 0x00000001
+
+#define GPIO_CFG 0x88
+#define GPIO_CFG_LED3_EN_ 0x40000000
+#define GPIO_CFG_LED2_EN_ 0x20000000
+#define GPIO_CFG_LED1_EN_ 0x10000000
+#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000
+#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000
+#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000
+#define GPIO_CFG_EEPR_EN_ 0x00700000
+#define GPIO_CFG_GPIOBUF2_ 0x00040000
+#define GPIO_CFG_GPIOBUF1_ 0x00020000
+#define GPIO_CFG_GPIOBUF0_ 0x00010000
+#define GPIO_CFG_GPIODIR2_ 0x00000400
+#define GPIO_CFG_GPIODIR1_ 0x00000200
+#define GPIO_CFG_GPIODIR0_ 0x00000100
+#define GPIO_CFG_GPIOD4_ 0x00000020
+#define GPIO_CFG_GPIOD3_ 0x00000010
+#define GPIO_CFG_GPIOD2_ 0x00000004
+#define GPIO_CFG_GPIOD1_ 0x00000002
+#define GPIO_CFG_GPIOD0_ 0x00000001
+
+#define GPT_CFG 0x8C
+#define GPT_CFG_TIMER_EN_ 0x20000000
+#define GPT_CFG_GPT_LOAD_ 0x0000FFFF
+
+#define GPT_CNT 0x90
+#define GPT_CNT_GPT_CNT_ 0x0000FFFF
+
+#define ENDIAN 0x98
+
+#define FREE_RUN 0x9C
+
+#define RX_DROP 0xA0
+
+#define MAC_CSR_CMD 0xA4
+#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000
+#define MAC_CSR_CMD_R_NOT_W_ 0x40000000
+#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF
+
+#define MAC_CSR_DATA 0xA8
+
+#define AFC_CFG 0xAC
+#define AFC_CFG_AFC_HI_ 0x00FF0000
+#define AFC_CFG_AFC_LO_ 0x0000FF00
+#define AFC_CFG_BACK_DUR_ 0x000000F0
+#define AFC_CFG_FCMULT_ 0x00000008
+#define AFC_CFG_FCBRD_ 0x00000004
+#define AFC_CFG_FCADD_ 0x00000002
+#define AFC_CFG_FCANY_ 0x00000001
+
+#define E2P_CMD 0xB0
+#define E2P_CMD_EPC_BUSY_ 0x80000000
+#define E2P_CMD_EPC_CMD_ 0x70000000
+#define E2P_CMD_EPC_CMD_READ_ 0x00000000
+#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000
+#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000
+#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000
+#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000
+#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000
+#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000
+#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000
+#define E2P_CMD_EPC_TIMEOUT_ 0x00000200
+#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100
+#define E2P_CMD_EPC_ADDR_ 0x000000FF
+
+#define E2P_DATA 0xB4
+#define E2P_DATA_EEPROM_DATA_ 0x000000FF
+#define LAN_REGISTER_EXTENT 0x00000100
+
+/*
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ */
+#define MAC_CR 0x01
+#define MAC_CR_RXALL_ 0x80000000
+#define MAC_CR_HBDIS_ 0x10000000
+#define MAC_CR_RCVOWN_ 0x00800000
+#define MAC_CR_LOOPBK_ 0x00200000
+#define MAC_CR_FDPX_ 0x00100000
+#define MAC_CR_MCPAS_ 0x00080000
+#define MAC_CR_PRMS_ 0x00040000
+#define MAC_CR_INVFILT_ 0x00020000
+#define MAC_CR_PASSBAD_ 0x00010000
+#define MAC_CR_HFILT_ 0x00008000
+#define MAC_CR_HPFILT_ 0x00002000
+#define MAC_CR_LCOLL_ 0x00001000
+#define MAC_CR_BCAST_ 0x00000800
+#define MAC_CR_DISRTY_ 0x00000400
+#define MAC_CR_PADSTR_ 0x00000100
+#define MAC_CR_BOLMT_MASK_ 0x000000C0
+#define MAC_CR_DFCHK_ 0x00000020
+#define MAC_CR_TXEN_ 0x00000008
+#define MAC_CR_RXEN_ 0x00000004
+
+#define ADDRH 0x02
+
+#define ADDRL 0x03
+
+#define HASHH 0x04
+
+#define HASHL 0x05
+
+#define MII_ACC 0x06
+#define MII_ACC_PHY_ADDR_ 0x0000F800
+#define MII_ACC_MIIRINDA_ 0x000007C0
+#define MII_ACC_MII_WRITE_ 0x00000002
+#define MII_ACC_MII_BUSY_ 0x00000001
+
+#define MII_DATA 0x07
+
+#define FLOW 0x08
+#define FLOW_FCPT_ 0xFFFF0000
+#define FLOW_FCPASS_ 0x00000004
+#define FLOW_FCEN_ 0x00000002
+#define FLOW_FCBSY_ 0x00000001
+
+#define VLAN1 0x09
+
+#define VLAN2 0x0A
+
+#define WUFF 0x0B
+
+#define WUCSR 0x0C
+#define WUCSR_GUE_ 0x00000200
+#define WUCSR_WUFR_ 0x00000040
+#define WUCSR_MPR_ 0x00000020
+#define WUCSR_WAKE_EN_ 0x00000004
+#define WUCSR_MPEN_ 0x00000002
+
+/*
+ * Phy definitions (vendor-specific)
+ */
+#define LAN9118_PHY_ID 0x00C0001C
+
+#define MII_INTSTS 0x1D
+
+#define MII_INTMSK 0x1E
+#define PHY_INTMSK_AN_RCV_ (1 << 1)
+#define PHY_INTMSK_PDFAULT_ (1 << 2)
+#define PHY_INTMSK_AN_ACK_ (1 << 3)
+#define PHY_INTMSK_LNKDOWN_ (1 << 4)
+#define PHY_INTMSK_RFAULT_ (1 << 5)
+#define PHY_INTMSK_AN_COMP_ (1 << 6)
+#define PHY_INTMSK_ENERGYON_ (1 << 7)
+#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \
+ PHY_INTMSK_AN_COMP_ | \
+ PHY_INTMSK_RFAULT_ | \
+ PHY_INTMSK_LNKDOWN_)
+
+#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \
+ ADVERTISE_PAUSE_ASYM)
+
+#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
+ LPA_PAUSE_ASYM)
+
+#endif /* __SMSC911X_H__ */