summaryrefslogtreecommitdiff
path: root/drivers/net/fec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/fec.c')
-rw-r--r--drivers/net/fec.c744
1 files changed, 658 insertions, 86 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index ecd5c71a7a8a..94d55a17d11c 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -18,6 +18,9 @@
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
* Copyright (c) 2004-2006 Macq Electronique SA.
*/
+/*
+ * Copyright 2006-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -36,16 +39,29 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/mach-types.h>
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
+#define FEC_ALIGNMENT (0x03) /*FEC needs 4bytes alignment*/
+#elif defined(CONFIG_ARCH_MXC)
+#include <mach/hardware.h>
+#include <mach/iim.h>
+#include "fec.h"
+#define FEC_ALIGNMENT (0x0F) /*FEC needs 128bits(32bytes) alignment*/
+#endif
+
+#define FEC_ADDR_ALIGNMENT(x) ((unsigned char *)(((unsigned long )(x) + (FEC_ALIGNMENT)) & (~FEC_ALIGNMENT)))
#if defined(CONFIG_FEC2)
#define FEC_MAX_PORTS 2
@@ -53,7 +69,7 @@
#define FEC_MAX_PORTS 1
#endif
-#if defined(CONFIG_M5272)
+#if defined(CONFIG_M5272) || defined(CONFIG_ARCH_MXC)
#define HAVE_mii_link_interrupt
#endif
@@ -72,6 +88,8 @@ static unsigned int fec_hw[] = {
(MCF_MBAR+0x30000),
#elif defined(CONFIG_M532x)
(MCF_MBAR+0xfc030000),
+#elif defined(CONFIG_ARCH_MXC)
+ (IO_ADDRESS(FEC_BASE_ADDR)),
#else
&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
#endif
@@ -149,6 +167,12 @@ typedef struct {
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#ifndef CONFIG_ARCH_MXC
+#define FEC_ENET_MASK ((uint)0xffc00000)
+#else
+#define FEC_ENET_MASK ((uint)0xfff80000)
+#endif
+
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
#define PKT_MAXBUF_SIZE 1518
@@ -162,7 +186,7 @@ typedef struct {
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -185,11 +209,13 @@ struct fec_enet_private {
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
ushort skb_cur;
ushort skb_dirty;
/* CPM dual port RAM relative addresses.
*/
+ void * cbd_mem_base; /* save the virtual base address of rx&tx buffer descripter */
cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
cbd_t *tx_bd_base;
cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
@@ -206,6 +232,7 @@ struct fec_enet_private {
uint phy_speed;
phy_info_t const *phy;
struct work_struct phy_task;
+ struct net_device *net;
uint sequence_done;
uint mii_phy_task_queued;
@@ -217,6 +244,8 @@ struct fec_enet_private {
int link;
int old_link;
int full_duplex;
+
+ struct clk *clk;
};
static int fec_enet_open(struct net_device *dev);
@@ -231,6 +260,17 @@ static void fec_restart(struct net_device *dev, int duplex);
static void fec_stop(struct net_device *dev);
static void fec_set_mac_address(struct net_device *dev);
+static void __inline__ fec_dcache_inv_range(void * start, void * end);
+static void __inline__ fec_dcache_flush_range(void * start, void * end);
+
+/*
+ * fec_copy_threshold controls the copy when recieving ethernet frame.
+ * If ethernet header aligns 4bytes, the ip header and upper header will not aligns 4bytes.
+ * The resean is ethernet header is 14bytes.
+ * And the max size of tcp & ip header is 128bytes. Normally it is 40bytes.
+ * So I set the default value between 128 to 256.
+ */
+static int fec_copy_threshold = -1;
/* MII processing. We keep this as simple as possible. Requests are
* placed on the list (if there is room). When the request is finished
@@ -342,10 +382,10 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* 4-byte boundaries. Use bounce buffers to copy data
* and get it aligned. Ugh.
*/
- if (bdp->cbd_bufaddr & 0x3) {
+ if ((bdp->cbd_bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
- memcpy(fep->tx_bounce[index], (void *) bdp->cbd_bufaddr, bdp->cbd_datlen);
+ memcpy(fep->tx_bounce[index], (void *) skb->data, skb->len);
bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]);
}
@@ -359,8 +399,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- flush_dcache_range((unsigned long)skb->data,
- (unsigned long)skb->data + skb->len);
+ fec_dcache_flush_range(__va(bdp->cbd_bufaddr), __va(bdp->cbd_bufaddr) +
+ bdp->cbd_datlen);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
@@ -373,7 +413,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
/* Trigger transmission start */
- fecp->fec_x_des_active = 0;
+ fecp->fec_x_des_active = 0x01000000;
/* If this was the last BD in the ring, start at the beginning again.
*/
@@ -460,7 +500,7 @@ fec_enet_interrupt(int irq, void * dev_id)
/* Handle receive event in its own function.
*/
- if (int_events & FEC_ENET_RXF) {
+ if (int_events & (FEC_ENET_RXF | FEC_ENET_RXB)) {
ret = IRQ_HANDLED;
fec_enet_rx(dev);
}
@@ -469,7 +509,7 @@ fec_enet_interrupt(int irq, void * dev_id)
descriptors. FEC handles all errors, we just discover
them as part of the transmit process.
*/
- if (int_events & FEC_ENET_TXF) {
+ if (int_events & (FEC_ENET_TXF | FEC_ENET_TXB)) {
ret = IRQ_HANDLED;
fec_enet_tx(dev);
}
@@ -572,6 +612,7 @@ fec_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+ int rx_index ;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -588,7 +629,7 @@ fec_enet_rx(struct net_device *dev)
bdp = fep->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
-
+ rx_index = bdp - fep->rx_bd_base;
#ifndef final_version
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
@@ -638,14 +679,31 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
* include that when passing upstream as it messes up
* bridging applications.
*/
- skb = dev_alloc_skb(pkt_len-4);
+ if ((pkt_len - 4) < fec_copy_threshold) {
+ skb = dev_alloc_skb(pkt_len);
+ } else {
+ skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ }
if (skb == NULL) {
printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
} else {
- skb_put(skb,pkt_len-4); /* Make room */
- skb_copy_to_linear_data(skb, data, pkt_len-4);
+ if ((pkt_len - 4) < fec_copy_threshold) {
+ skb_reserve(skb, 2); /*skip 2bytes, so ipheader is align 4bytes*/
+ skb_put(skb,pkt_len-4); /* Make room */
+ skb_copy_to_linear_data(skb, data, pkt_len-4);
+ } else {
+ struct sk_buff * pskb = fep->rx_skbuff[rx_index];
+
+ fec_dcache_inv_range(skb->data, skb->data +
+ FEC_ENET_RX_FRSIZE);
+ fep->rx_skbuff[rx_index] = skb;
+ skb->data = FEC_ADDR_ALIGNMENT(skb->data);
+ bdp->cbd_bufaddr = __pa(skb->data);
+ skb_put(pskb,pkt_len-4); /* Make room */
+ skb = pskb;
+ }
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
@@ -672,7 +730,7 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- fecp->fec_r_des_active = 0;
+ fecp->fec_r_des_active = 0x01000000;
#endif
} /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
fep->cur_rx = (cbd_t *)bdp;
@@ -1207,6 +1265,48 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
+
+static phy_info_t phy_info_lan8710 = {
+ 0x0007C0F,
+ "LAN8710",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
+
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1216,6 +1316,8 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
+ &phy_info_lan8710,
NULL
};
@@ -1227,6 +1329,21 @@ mii_link_interrupt(int irq, void * dev_id);
#if defined(CONFIG_M5272)
/*
+ * * do some initializtion based architecture of this chip
+ * */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * * do some cleanup based architecture of this chip
+ * */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
+/*
* Code specific to Coldfire 5272 setup.
*/
static void __inline__ fec_request_intrs(struct net_device *dev)
@@ -1327,15 +1444,40 @@ static void __inline__ fec_phy_ack_intr(void)
*icrp = 0x0d000000;
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
+ return addr;
}
/*
- * Do not need to make region uncached on 5272.
+ * unmap memory erea started with addr from uncachable erea.
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static void __inline__ fec_unmap_uncache(void * addr)
{
+ return ;
}
/* ------------------------------------------------------------------------- */
@@ -1343,6 +1485,22 @@ static void __inline__ fec_uncache(unsigned long addr)
#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
+/*
* Code specific to Coldfire 5230/5231/5232/5234/5235,
* the 5270/5271/5274/5275 and 5280/5282 setups.
*/
@@ -1489,20 +1647,58 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
}
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
/*
- * Do not need to make region uncached on 5272.
+ * flush dcache related with the virtual memory range(start, end)
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return addr;
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
/* ------------------------------------------------------------------------- */
#elif defined(CONFIG_M520x)
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
/*
* Code specific to Coldfire 520x
@@ -1610,17 +1806,63 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
{
+ return ;
}
-static void __inline__ fec_uncache(unsigned long addr)
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
+ return addr;
}
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
+}
+
+
/* ------------------------------------------------------------------------- */
#elif defined(CONFIG_M532x)
+
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
+
/*
* Code specific for M532x
*/
@@ -1749,21 +1991,297 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
}
/*
- * Do not need to make region uncached on 532x.
+ * invalidate dcache related with the virtual memory range(start, end)
*/
-static void __inline__ fec_uncache(unsigned long addr)
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return addr;
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
/* ------------------------------------------------------------------------- */
+#elif defined(CONFIG_ARCH_MXC)
+
+extern void gpio_fec_active(void);
+extern void gpio_fec_inactive(void);
+extern unsigned int expio_intr_fec;
+
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ struct clk *clk;
+ gpio_fec_active();
+ clk = clk_get(NULL, "fec_clk");
+ clk_enable(clk);
+ clk_put(clk);
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ struct clk *clk;
+ clk = clk_get(NULL, "fec_clk");
+ clk_disable(clk);
+ clk_put(clk);
+ gpio_fec_inactive();
+ return;
+}
+
+/*
+ * Code specific to Freescale i.MXC
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ /* Setup interrupt handlers. */
+ if (request_irq(MXC_INT_FEC, fec_enet_interrupt, 0, "fec", dev) != 0)
+ panic("FEC: Could not allocate FEC IRQ(%d)!\n", MXC_INT_FEC);
+ /* TODO: disable now due to CPLD issue */
+ if ((expio_intr_fec > 0) &&
+ (request_irq(expio_intr_fec, mii_link_interrupt, 0, "fec(MII)", dev) != 0))
+ panic("FEC: Could not allocate FEC(MII) IRQ(%d)!\n", expio_intr_fec);
+ disable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ u32 rate;
+ struct clk *clk;
+ volatile fec_t *fecp;
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ */
+ clk = clk_get(NULL, "fec_clk");
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ fep->phy_speed =
+ ((((rate / 2 + 4999999) / 2500000) / 2) & 0x3F) << 1;
+ fecp->fec_mii_speed = fep->phy_speed;
+ fec_restart(dev, 0);
+}
+
+#define FEC_IIM_BASE IO_ADDRESS(IIM_BASE_ADDR)
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+ int i;
+ unsigned long fec_mac_base = FEC_IIM_BASE + MXC_IIMKEY0;
+ fecp = fep->hwp;
+
+ if (fecp->fec_addr_low || fecp->fec_addr_high) {
+ *((unsigned long *) &tmpaddr[0]) =
+ be32_to_cpu(fecp->fec_addr_low);
+ *((unsigned short *) &tmpaddr[4]) =
+ be32_to_cpu(fecp->fec_addr_high);
+ iap = &tmpaddr[0];
+ } else {
+ if (cpu_is_mx27_rev(CHIP_REV_2_0) > 0)
+ fec_mac_base = FEC_IIM_BASE + MXC_IIMMAC;
+
+ memset(tmpaddr, 0, ETH_ALEN);
+ if (!(machine_is_mx35_3ds() || cpu_is_mx51())) {
+ /*
+ * Get MAC address from IIM.
+ * If it is all 1's or 0's, use the default.
+ */
+ for (i = 0; i < ETH_ALEN; i++)
+ tmpaddr[ETH_ALEN-1-i] =
+ __raw_readb(fec_mac_base + i * 4);
+ }
+ iap = &tmpaddr[0];
+
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ }
+
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+}
+
+#ifndef MODULE
+static int fec_mac_setup(char *new_mac)
+{
+ char *ptr, *p = new_mac;
+ int i = 0;
+
+ while (p && (*p) && i < 6) {
+ ptr = strchr(p, ':');
+ if (ptr)
+ *ptr++ = '\0';
+
+ if (strlen(p)) {
+ unsigned long tmp = simple_strtoul(p, NULL, 16);
+ if (tmp > 0xff)
+ break;
+ fec_mac_default[i++] = tmp;
+ }
+ p = ptr;
+ }
+
+ return 0;
+}
+
+__setup("fec_mac=", fec_mac_setup);
+#endif
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+ if (expio_intr_fec > 0)
+ enable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+ if (expio_intr_fec > 0)
+ disable_irq(expio_intr_fec);
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+ if (expio_intr_fec > 0)
+ disable_irq(expio_intr_fec);
+}
+
+#ifdef CONFIG_ARCH_MX25
+/*
+ * i.MX25 allows RMII mode to be configured via a gasket
+ */
+#define FEC_MIIGSK_CFGR_FRCONT (1 << 6)
+#define FEC_MIIGSK_CFGR_LBMODE (1 << 4)
+#define FEC_MIIGSK_CFGR_EMODE (1 << 3)
+#define FEC_MIIGSK_CFGR_IF_MODE_MASK (3 << 0)
+#define FEC_MIIGSK_CFGR_IF_MODE_MII (0 << 0)
+#define FEC_MIIGSK_CFGR_IF_MODE_RMII (1 << 0)
+
+#define FEC_MIIGSK_ENR_READY (1 << 2)
+#define FEC_MIIGSK_ENR_EN (1 << 1)
+
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp = fep->hwp;
+ /*
+ * Set up the MII gasket for RMII mode
+ */
+ printk("%s: enable RMII gasket\n", dev->name);
+ /* disable the gasket and wait */
+ fecp->fec_miigsk_enr = 0;
+ while (fecp->fec_miigsk_enr & FEC_MIIGSK_ENR_READY)
+ udelay(1);
+
+ /* configure the gasket for RMII, 50 MHz, no loopback, no echo */
+ fecp->fec_miigsk_cfgr = FEC_MIIGSK_CFGR_IF_MODE_RMII;
+
+ /* re-enable the gasket */
+ fecp->fec_miigsk_enr = FEC_MIIGSK_ENR_EN;
+}
#else
+static void __inline__ fec_localhw_setup(struct net_device *dev)
+{
+}
+#endif
+
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ dma_sync_single_for_device(NULL, (unsigned long)__pa(start),
+ (unsigned long)(end - start),
+ DMA_FROM_DEVICE);
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ dma_sync_single_for_device(NULL, (unsigned long)__pa(start),
+ (unsigned long)(end - start), DMA_TO_DEVICE);
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
+{
+ return (unsigned long)ioremap(__pa(addr), size);
+}
+
+/*
+ * unmap memory erea started with addr from uncachable erea.
+ */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return iounmap(addr);
+}
+
+/* ------------------------------------------------------------------------- */
+
+#else
+/*
+ * do some initializtion based architecture of this chip
+ */
+static void __inline__ fec_arch_init(void)
+{
+ return;
+}
+/*
+ * do some cleanup based architecture of this chip
+ */
+static void __inline__ fec_arch_exit(void)
+{
+ return;
+}
/*
* Code specific to the MPC860T setup.
@@ -1831,7 +2349,7 @@ static void __inline__ fec_phy_ack_intr(void)
{
}
-static void __inline__ fec_localhw_setup(void)
+static void __inline__ fec_localhw_setup(struct net_device *dev)
{
volatile fec_t *fecp;
@@ -1842,12 +2360,40 @@ static void __inline__ fec_localhw_setup(void)
fecp->fec_fun_code = 0x78000000;
}
-static void __inline__ fec_uncache(unsigned long addr)
+/*
+ * invalidate dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_inv_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * flush dcache related with the virtual memory range(start, end)
+ */
+static void __inline__ fec_dcache_flush_range(void * start, void * end)
+{
+ return ;
+}
+
+/*
+ * map memory space (addr, addr+size) to uncachable erea.
+ */
+static unsigned long __inline__ fec_map_uncache(unsigned long addr, int size)
{
pte_t *pte;
pte = va_to_pte(mem_addr);
pte_val(*pte) |= _PAGE_NO_CACHE;
flush_tlb_page(init_mm.mmap, mem_addr);
+ return addr;
+}
+
+/*
+ * * unmap memory erea started with addr from uncachable erea.
+ * */
+static void __inline__ fec_unmap_uncache(void * addr)
+{
+ return ;
}
#endif
@@ -2073,10 +2619,14 @@ mii_link_interrupt(int irq, void * dev_id)
#if 0
disable_irq(fep->mii_irq); /* disable now, enable later */
#endif
-
- mii_do_cmd(dev, fep->phy->ack_int);
- mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
-
+ /*
+ * Some board will trigger phy interrupt before phy enable.
+ * And at that moment , fep->phy is not initialized.
+ */
+ if (fep->phy) {
+ mii_do_cmd(dev, fep->phy->ack_int);
+ mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
+ }
return IRQ_HANDLED;
}
#endif
@@ -2086,6 +2636,7 @@ fec_enet_open(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ fec_arch_init();
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -2094,6 +2645,8 @@ fec_enet_open(struct net_device *dev)
fep->sequence_done = 0;
fep->link = 0;
+ fec_restart(dev, 1);
+
if (fep->phy) {
mii_do_cmd(dev, fep->phy->ack_int);
mii_do_cmd(dev, fep->phy->config);
@@ -2110,18 +2663,14 @@ fec_enet_open(struct net_device *dev)
schedule();
mii_do_cmd(dev, fep->phy->startup);
-
- /* Set the initial link state to true. A lot of hardware
- * based on this device does not implement a PHY interrupt,
- * so we are never notified of link change.
- */
- fep->link = 1;
- } else {
- fep->link = 1; /* lets just try it and see */
- /* no phy, go full duplex, it's most likely a hub chip */
- fec_restart(dev, 1);
}
+ /* Set the initial link state to true. A lot of hardware
+ * based on this device does not implement a PHY interrupt,
+ * so we are never notified of link change.
+ */
+ fep->link = 1;
+
netif_start_queue(dev);
fep->opened = 1;
return 0; /* Success */
@@ -2135,9 +2684,10 @@ fec_enet_close(struct net_device *dev)
/* Don't know what to do yet.
*/
fep->opened = 0;
- netif_stop_queue(dev);
- fec_stop(dev);
-
+ if (fep->link) {
+ fec_stop(dev);
+ }
+ fec_arch_exit();
return 0;
}
@@ -2248,6 +2798,7 @@ int __init fec_enet_init(struct net_device *dev)
unsigned long mem_addr;
volatile cbd_t *bdp;
cbd_t *cbd_base;
+ struct sk_buff* pskb;
volatile fec_t *fecp;
int i, j;
static int index = 0;
@@ -2256,6 +2807,8 @@ int __init fec_enet_init(struct net_device *dev)
if (index >= FEC_MAX_PORTS)
return -ENXIO;
+ fep->net = dev;
+
/* Allocate memory for buffer descriptors.
*/
mem_addr = __get_free_page(GFP_KERNEL);
@@ -2264,6 +2817,7 @@ int __init fec_enet_init(struct net_device *dev)
return -ENOMEM;
}
+ fep->cbd_mem_base = (void *)mem_addr;
spin_lock_init(&fep->hw_lock);
spin_lock_init(&fep->mii_lock);
@@ -2288,10 +2842,14 @@ int __init fec_enet_init(struct net_device *dev)
*/
fec_get_mac(dev);
- cbd_base = (cbd_t *)mem_addr;
- /* XXX: missing check for allocation failure */
+ cbd_base = (cbd_t *)fec_map_uncache(mem_addr, PAGE_SIZE);
+ if (cbd_base == NULL) {
+ free_page(mem_addr);
+ printk("FEC: map descriptor memory to uncacheable failed?\n");
+ return -ENOMEM;
+ }
- fec_uncache(mem_addr);
+ /* XXX: missing check for allocation failure */
/* Set receive and transmit descriptor base.
*/
@@ -2306,25 +2864,26 @@ int __init fec_enet_init(struct net_device *dev)
/* Initialize the receive buffer descriptors.
*/
bdp = fep->rx_bd_base;
- for (i=0; i<FEC_ENET_RX_PAGES; i++) {
-
- /* Allocate a page.
- */
- mem_addr = __get_free_page(GFP_KERNEL);
- /* XXX: missing check for allocation failure */
-
- fec_uncache(mem_addr);
-
- /* Initialize the BD for every fragment in the page.
- */
- for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp->cbd_bufaddr = __pa(mem_addr);
- mem_addr += FEC_ENET_RX_FRSIZE;
- bdp++;
+ for (i=0; i<RX_RING_SIZE; i++, bdp++) {
+ pskb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
+ if(pskb == NULL) {
+ for(; i>0; i--) {
+ if( fep->rx_skbuff[i-1] ) {
+ kfree_skb(fep->rx_skbuff[i-1]);
+ fep->rx_skbuff[i-1] = NULL;
+ }
+ }
+ printk("FEC: allocate skb fail when initializing rx buffer \n");
+ free_page(mem_addr);
+ return -ENOMEM;
}
+ fep->rx_skbuff[i] = pskb;
+ fec_dcache_inv_range(pskb->data, pskb->data +
+ FEC_ENET_RX_FRSIZE);
+ pskb->data = FEC_ADDR_ALIGNMENT(pskb->data);
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp->cbd_bufaddr = __pa(pskb->data);
}
-
/* Set the last buffer to wrap.
*/
bdp--;
@@ -2357,19 +2916,23 @@ int __init fec_enet_init(struct net_device *dev)
/* Set receive and transmit descriptor base.
*/
- fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
- fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+ fecp->fec_r_des_start = __pa((uint)(fep->cbd_mem_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->cbd_mem_base + RX_RING_SIZE*sizeof(cbd_t)));
/* Install our interrupt handlers. This varies depending on
* the architecture.
*/
fec_request_intrs(dev);
+ /* Clear and enable interrupts */
+ fecp->fec_ievent = FEC_ENET_MASK;
+ fecp->fec_imask = FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII;
+
fecp->fec_grp_hash_table_high = 0;
fecp->fec_grp_hash_table_low = 0;
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
+ fecp->fec_r_des_active = 0x01000000;
#ifndef CONFIG_M5272
fecp->fec_hash_table_high = 0;
fecp->fec_hash_table_low = 0;
@@ -2392,10 +2955,6 @@ int __init fec_enet_init(struct net_device *dev)
/* setup MII interface */
fec_set_mii(dev, fep);
- /* Clear and enable interrupts */
- fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
-
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
*/
@@ -2427,9 +2986,15 @@ fec_restart(struct net_device *dev, int duplex)
fecp->fec_ecntrl = 1;
udelay(10);
+ /* Enable interrupts we wish to service.
+ */
+ fecp->fec_imask = FEC_ENET_TXF | FEC_ENET_TXB | FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII;
+
/* Clear any outstanding interrupt.
- */
- fecp->fec_ievent = 0xffc00000;
+ *
+ */
+ fecp->fec_ievent = FEC_ENET_MASK;
+
fec_enable_phy_intr();
/* Set station address.
@@ -2445,12 +3010,12 @@ fec_restart(struct net_device *dev, int duplex)
*/
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
- fec_localhw_setup();
+ fec_localhw_setup(dev);
/* Set receive and transmit descriptor base.
*/
- fecp->fec_r_des_start = __pa((uint)(fep->rx_bd_base));
- fecp->fec_x_des_start = __pa((uint)(fep->tx_bd_base));
+ fecp->fec_r_des_start = __pa((uint)(fep->cbd_mem_base));
+ fecp->fec_x_des_start = __pa((uint)(fep->cbd_mem_base + RX_RING_SIZE*sizeof(cbd_t)));
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
@@ -2517,11 +3082,7 @@ fec_restart(struct net_device *dev, int duplex)
/* And last, enable the transmit and receive processing.
*/
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0;
-
- /* Enable interrupts we wish to service.
- */
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
+ fecp->fec_r_des_active = 0x01000000;
}
static void
@@ -2530,6 +3091,8 @@ fec_stop(struct net_device *dev)
volatile fec_t *fecp;
struct fec_enet_private *fep;
+ netif_stop_queue(dev);
+
fep = netdev_priv(dev);
fecp = fep->hwp;
@@ -2561,15 +3124,18 @@ fec_stop(struct net_device *dev)
static int __init fec_enet_module_init(void)
{
struct net_device *dev;
- int i, err;
+ int i, err, ret = 0;
DECLARE_MAC_BUF(mac);
printk("FEC ENET Version 0.2\n");
+ fec_arch_init();
for (i = 0; (i < FEC_MAX_PORTS); i++) {
dev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto exit;
+ }
err = fec_enet_init(dev);
if (err) {
free_netdev(dev);
@@ -2578,13 +3144,19 @@ static int __init fec_enet_module_init(void)
if (register_netdev(dev) != 0) {
/* XXX: missing cleanup here */
free_netdev(dev);
- return -EIO;
+ ret = -EIO;
+ goto exit;
}
printk("%s: ethernet %s\n",
dev->name, print_mac(mac, dev->dev_addr));
}
- return 0;
+
+exit:
+ fec_arch_exit();
+ return ret;
+
+
}
module_init(fec_enet_module_init);