summaryrefslogtreecommitdiff
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h40
-rw-r--r--drivers/net/cxgb3/common.h29
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c217
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c18
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h2
-rw-r--r--drivers/net/cxgb3/regs.h11
-rw-r--r--drivers/net/cxgb3/sge.c446
-rw-r--r--drivers/net/cxgb3/t3_hw.c162
-rw-r--r--drivers/net/cxgb3/t3cdev.h3
-rw-r--r--drivers/net/cxgb3/version.h2
10 files changed, 606 insertions, 324 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 80c3d8f268a7..20e887de2545 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -50,7 +50,9 @@ typedef irqreturn_t(*intr_handler_t) (int, void *);
struct vlan_group;
+struct adapter;
struct port_info {
+ struct adapter *adapter;
struct vlan_group *vlan_grp;
const struct port_type_info *port_type;
u8 port_id;
@@ -71,27 +73,29 @@ enum { /* adapter flags */
QUEUES_BOUND = (1 << 3),
};
+struct fl_pg_chunk {
+ struct page *page;
+ void *va;
+ unsigned int offset;
+};
+
struct rx_desc;
struct rx_sw_desc;
-struct sge_fl_page {
- struct skb_frag_struct frag;
- unsigned char *va;
-};
-
-struct sge_fl { /* SGE per free-buffer list state */
- unsigned int buf_size; /* size of each Rx buffer */
- unsigned int credits; /* # of available Rx buffers */
- unsigned int size; /* capacity of free list */
- unsigned int cidx; /* consumer index */
- unsigned int pidx; /* producer index */
- unsigned int gen; /* free list generation */
- unsigned int cntxt_id; /* SGE context id for the free list */
- struct sge_fl_page page;
- struct rx_desc *desc; /* address of HW Rx descriptor ring */
- struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
- dma_addr_t phys_addr; /* physical address of HW ring start */
- unsigned long empty; /* # of times queue ran out of buffers */
+struct sge_fl { /* SGE per free-buffer list state */
+ unsigned int buf_size; /* size of each Rx buffer */
+ unsigned int credits; /* # of available Rx buffers */
+ unsigned int size; /* capacity of free list */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int gen; /* free list generation */
+ struct fl_pg_chunk pg_chunk;/* page chunk cache */
+ unsigned int use_pages; /* whether FL uses pages or sk_buffs */
+ struct rx_desc *desc; /* address of HW Rx descriptor ring */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+ dma_addr_t phys_addr; /* physical address of HW ring start */
+ unsigned int cntxt_id; /* SGE context id for the free list */
+ unsigned long empty; /* # of times queue ran out of buffers */
unsigned long alloc_failed; /* # of times buffer allocation failed */
};
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 8d1379633698..2129210a67c1 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -101,6 +101,7 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
+ PROTO_SRAM_LINES = 128, /* size of TP sram */
};
#define MAX_RX_COALESCING_LEN 16224U
@@ -124,6 +125,30 @@ enum { /* adapter interrupt-maintained statistics */
};
enum {
+ TP_VERSION_MAJOR = 1,
+ TP_VERSION_MINOR = 0,
+ TP_VERSION_MICRO = 44
+};
+
+#define S_TP_VERSION_MAJOR 16
+#define M_TP_VERSION_MAJOR 0xFF
+#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
+#define G_TP_VERSION_MAJOR(x) \
+ (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
+
+#define S_TP_VERSION_MINOR 8
+#define M_TP_VERSION_MINOR 0xFF
+#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
+#define G_TP_VERSION_MINOR(x) \
+ (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
+
+#define S_TP_VERSION_MICRO 0
+#define M_TP_VERSION_MICRO 0xFF
+#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
+#define G_TP_VERSION_MICRO(x) \
+ (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
+
+enum {
SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
@@ -654,6 +679,10 @@ const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
+int t3_get_tp_version(struct adapter *adapter, u32 *vers);
+int t3_check_tpsram_version(struct adapter *adapter, int *must_load);
+int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
+int t3_set_proto_sram(struct adapter *adap, u8 *data);
int t3_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8a1f5452c51..5ab319cfe5de 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -43,6 +43,7 @@
#include <linux/proc_fs.h>
#include <linux/rtnetlink.h>
#include <linux/firmware.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -357,11 +358,14 @@ static int init_dummy_netdevs(struct adapter *adap)
for (j = 0; j < pi->nqsets - 1; j++) {
if (!adap->dummy_netdev[dummy_idx]) {
- nd = alloc_netdev(0, "", ether_setup);
+ struct port_info *p;
+
+ nd = alloc_netdev(sizeof(*p), "", ether_setup);
if (!nd)
goto free_all;
- nd->priv = adap;
+ p = netdev_priv(nd);
+ p->adapter = adap;
nd->weight = 64;
set_bit(__LINK_STATE_START, &nd->state);
adap->dummy_netdev[dummy_idx] = nd;
@@ -481,7 +485,8 @@ static ssize_t attr_store(struct device *d, struct device_attribute *attr,
#define CXGB3_SHOW(name, val_expr) \
static ssize_t format_##name(struct net_device *dev, char *buf) \
{ \
- struct adapter *adap = dev->priv; \
+ struct port_info *pi = netdev_priv(dev); \
+ struct adapter *adap = pi->adapter; \
return sprintf(buf, "%u\n", val_expr); \
} \
static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
@@ -492,7 +497,8 @@ static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
{
- struct adapter *adap = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
if (adap->flags & FULL_INIT_DONE)
@@ -514,7 +520,8 @@ static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
static ssize_t set_nservers(struct net_device *dev, unsigned int val)
{
- struct adapter *adap = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
if (adap->flags & FULL_INIT_DONE)
return -EBUSY;
@@ -555,9 +562,10 @@ static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
char *buf, int sched)
{
- ssize_t len;
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
unsigned int v, addr, bpt, cpt;
- struct adapter *adap = to_net_dev(d)->priv;
+ ssize_t len;
addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
rtnl_lock();
@@ -580,10 +588,11 @@ static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len, int sched)
{
+ struct port_info *pi = netdev_priv(to_net_dev(d));
+ struct adapter *adap = pi->adapter;
+ unsigned int val;
char *endp;
ssize_t ret;
- unsigned int val;
- struct adapter *adap = to_net_dev(d)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -720,6 +729,7 @@ static void bind_qsets(struct adapter *adap)
}
#define FW_FNAME "t3fw-%d.%d.%d.bin"
+#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
static int upgrade_fw(struct adapter *adap)
{
@@ -738,6 +748,71 @@ static int upgrade_fw(struct adapter *adap)
}
ret = t3_load_fw(adap, fw->data, fw->size);
release_firmware(fw);
+
+ if (ret == 0)
+ dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
+ FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
+
+ return ret;
+}
+
+static inline char t3rev2char(struct adapter *adapter)
+{
+ char rev = 0;
+
+ switch(adapter->params.rev) {
+ case T3_REV_B:
+ case T3_REV_B2:
+ rev = 'b';
+ break;
+ }
+ return rev;
+}
+
+int update_tpsram(struct adapter *adap)
+{
+ const struct firmware *tpsram;
+ char buf[64];
+ struct device *dev = &adap->pdev->dev;
+ int ret;
+ char rev;
+
+ rev = t3rev2char(adap);
+ if (!rev)
+ return 0;
+
+ snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+
+ ret = request_firmware(&tpsram, buf, dev);
+ if (ret < 0) {
+ dev_err(dev, "could not load TP SRAM: unable to load %s\n",
+ buf);
+ return ret;
+ }
+
+ ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
+ if (ret)
+ goto release_tpsram;
+
+ ret = t3_set_proto_sram(adap, tpsram->data);
+ if (ret == 0)
+ dev_info(dev,
+ "successful update of protocol engine "
+ "to %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ else
+ dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
+ TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
+ if (ret)
+ dev_err(dev, "loading protocol SRAM failed\n");
+
+release_tpsram:
+ release_firmware(tpsram);
+
return ret;
}
@@ -754,6 +829,7 @@ static int upgrade_fw(struct adapter *adap)
static int cxgb_up(struct adapter *adap)
{
int err = 0;
+ int must_load;
if (!(adap->flags & FULL_INIT_DONE)) {
err = t3_check_fw_version(adap);
@@ -762,6 +838,13 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto out;
+ err = t3_check_tpsram_version(adap, &must_load);
+ if (err == -EINVAL) {
+ err = update_tpsram(adap);
+ if (err && must_load)
+ goto out;
+ }
+
err = init_dummy_netdevs(adap);
if (err)
goto out;
@@ -857,8 +940,9 @@ static void schedule_chk_task(struct adapter *adap)
static int offload_open(struct net_device *dev)
{
- struct adapter *adapter = dev->priv;
- struct t3cdev *tdev = T3CDEV(dev);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct t3cdev *tdev = dev2t3cdev(dev);
int adap_up = adapter->open_device_map & PORT_MASK;
int err = 0;
@@ -923,10 +1007,10 @@ static int offload_close(struct t3cdev *tdev)
static int cxgb_open(struct net_device *dev)
{
- int err;
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int other_ports = adapter->open_device_map & PORT_MASK;
+ int err;
if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
return err;
@@ -950,17 +1034,17 @@ static int cxgb_open(struct net_device *dev)
static int cxgb_close(struct net_device *dev)
{
- struct adapter *adapter = dev->priv;
- struct port_info *p = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
- t3_port_intr_disable(adapter, p->port_id);
+ t3_port_intr_disable(adapter, pi->port_id);
netif_stop_queue(dev);
- p->phy.ops->power_down(&p->phy, 1);
+ pi->phy.ops->power_down(&pi->phy, 1);
netif_carrier_off(dev);
- t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+ t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
spin_lock(&adapter->work_lock); /* sync with update task */
- clear_bit(p->port_id, &adapter->open_device_map);
+ clear_bit(pi->port_id, &adapter->open_device_map);
spin_unlock(&adapter->work_lock);
if (!(adapter->open_device_map & PORT_MASK))
@@ -975,13 +1059,13 @@ static int cxgb_close(struct net_device *dev)
static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
{
- struct adapter *adapter = dev->priv;
- struct port_info *p = netdev_priv(dev);
- struct net_device_stats *ns = &p->netstats;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ struct net_device_stats *ns = &pi->netstats;
const struct mac_stats *pstats;
spin_lock(&adapter->stats_lock);
- pstats = t3_mac_update_stats(&p->mac);
+ pstats = t3_mac_update_stats(&pi->mac);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = pstats->tx_octets;
@@ -1014,14 +1098,16 @@ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
static u32 get_msglevel(struct net_device *dev)
{
- struct adapter *adapter = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
return adapter->msg_enable;
}
static void set_msglevel(struct net_device *dev, u32 val)
{
- struct adapter *adapter = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
adapter->msg_enable = val;
}
@@ -1095,10 +1181,13 @@ static int get_eeprom_len(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
u32 fw_vers = 0;
- struct adapter *adapter = dev->priv;
+ u32 tp_vers = 0;
t3_get_fw_version(adapter, &fw_vers);
+ t3_get_tp_version(adapter, &tp_vers);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
@@ -1107,11 +1196,14 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
strcpy(info->fw_version, "N/A");
else {
snprintf(info->fw_version, sizeof(info->fw_version),
- "%s %u.%u.%u",
+ "%s %u.%u.%u TP %u.%u.%u",
G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
G_FW_VERSION_MAJOR(fw_vers),
G_FW_VERSION_MINOR(fw_vers),
- G_FW_VERSION_MICRO(fw_vers));
+ G_FW_VERSION_MICRO(fw_vers),
+ G_TP_VERSION_MAJOR(tp_vers),
+ G_TP_VERSION_MINOR(tp_vers),
+ G_TP_VERSION_MICRO(tp_vers));
}
}
@@ -1135,8 +1227,8 @@ static unsigned long collect_sge_port_stats(struct adapter *adapter,
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
const struct mac_stats *s;
spin_lock(&adapter->stats_lock);
@@ -1204,7 +1296,8 @@ static inline void reg_block_dump(struct adapter *ap, void *buf,
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- struct adapter *ap = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *ap = pi->adapter;
/*
* Version scheme:
@@ -1245,8 +1338,9 @@ static int restart_autoneg(struct net_device *dev)
static int cxgb3_phys_id(struct net_device *dev, u32 data)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int i;
- struct adapter *adapter = dev->priv;
if (data == 0)
data = 2;
@@ -1407,8 +1501,8 @@ static int set_rx_csum(struct net_device *dev, u32 data)
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
- const struct adapter *adapter = dev->priv;
- const struct port_info *pi = netdev_priv(dev);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
e->rx_max_pending = MAX_RX_BUFFERS;
@@ -1424,10 +1518,10 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
- int i;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct qset_params *q;
- struct adapter *adapter = dev->priv;
- const struct port_info *pi = netdev_priv(dev);
+ int i;
if (e->rx_pending > MAX_RX_BUFFERS ||
e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
@@ -1456,7 +1550,8 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
- struct adapter *adapter = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct qset_params *qsp = &adapter->params.sge.qset[0];
struct sge_qset *qs = &adapter->sge.qs[0];
@@ -1470,7 +1565,8 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
{
- struct adapter *adapter = dev->priv;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct qset_params *q = adapter->params.sge.qset;
c->rx_coalesce_usecs = q->coalesce_usecs;
@@ -1480,8 +1576,9 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
u8 * data)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int i, err = 0;
- struct adapter *adapter = dev->priv;
u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
if (!buf)
@@ -1500,10 +1597,11 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 * data)
{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ u32 aligned_offset, aligned_len, *p;
u8 *buf;
int err = 0;
- u32 aligned_offset, aligned_len, *p;
- struct adapter *adapter = dev->priv;
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
@@ -1582,7 +1680,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
- .get_perm_addr = ethtool_op_get_perm_addr
};
static int in_range(int val, int lo, int hi)
@@ -1592,9 +1689,10 @@ static int in_range(int val, int lo, int hi)
static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
{
- int ret;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
u32 cmd;
- struct adapter *adapter = dev->priv;
+ int ret;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
@@ -1818,8 +1916,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
- if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
- !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
+ if (!is_power_of_2(m.rx_pg_sz) ||
+ !is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
if (!(m.rx_pg_sz & 0x14000))
return -EINVAL; /* not 16KB or 64KB */
@@ -1923,10 +2021,10 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- int ret, mmd;
- struct adapter *adapter = dev->priv;
- struct port_info *pi = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(req);
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret, mmd;
switch (cmd) {
case SIOCGMIIPHY:
@@ -1994,9 +2092,9 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
{
- int ret;
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+ int ret;
if (new_mtu < 81) /* accommodate SACK */
return -EINVAL;
@@ -2013,8 +2111,8 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -2050,8 +2148,8 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
pi->vlan_grp = grp;
if (adapter->params.rev > 0)
@@ -2070,8 +2168,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
- struct adapter *adapter = dev->priv;
struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
int qidx;
for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
@@ -2397,6 +2495,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
+ pi->adapter = adapter;
pi->rx_csum_offload = 1;
pi->nqsets = 1;
pi->first_qset = i;
@@ -2406,7 +2505,6 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
- netdev->priv = adapter;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
netdev->features |= NETIF_F_LLTX;
if (pci_using_dac)
@@ -2431,12 +2529,12 @@ static int __devinit init_one(struct pci_dev *pdev,
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
- pci_set_drvdata(pdev, adapter->port[0]);
+ pci_set_drvdata(pdev, adapter);
if (t3_prep_adapter(adapter, ai, 1) < 0) {
err = -ENODEV;
goto out_free_dev;
}
-
+
/*
* The card is now ready to go. If any errors occur during device
* registration we do not fail the whole card but rather proceed only
@@ -2504,11 +2602,10 @@ out_release_regions:
static void __devexit remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct adapter *adapter = pci_get_drvdata(pdev);
- if (dev) {
+ if (adapter) {
int i;
- struct adapter *adapter = dev->priv;
t3_sge_stop(adapter);
sysfs_remove_group(&adapter->port[0]->dev.kobj,
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ebcf35e4cf5b..bdff7baeb59d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -593,6 +593,16 @@ int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
EXPORT_SYMBOL(cxgb3_alloc_stid);
+/* Get the t3cdev associated with a net_device */
+struct t3cdev *dev2t3cdev(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return (struct t3cdev *)pi->adapter;
+}
+
+EXPORT_SYMBOL(dev2t3cdev);
+
static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
{
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
@@ -699,7 +709,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
* the buffer.
*/
static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
- int gfp)
+ gfp_t gfp)
{
if (likely(!skb_cloned(skb))) {
BUG_ON(skb->len < len);
@@ -925,7 +935,7 @@ void cxgb_neigh_update(struct neighbour *neigh)
struct net_device *dev = neigh->dev;
if (dev && (is_offloading(dev))) {
- struct t3cdev *tdev = T3CDEV(dev);
+ struct t3cdev *tdev = dev2t3cdev(dev);
BUG_ON(!tdev);
t3_l2t_update(tdev, neigh);
@@ -973,9 +983,9 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
"device ignored.\n", __FUNCTION__);
return;
}
- tdev = T3CDEV(olddev);
+ tdev = dev2t3cdev(olddev);
BUG_ON(!tdev);
- if (tdev != T3CDEV(newdev)) {
+ if (tdev != dev2t3cdev(newdev)) {
printk(KERN_WARNING "%s: Redirect to different "
"offload device ignored.\n", __FUNCTION__);
return;
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index f15446a32efc..7a379138b5a6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -51,6 +51,8 @@ void cxgb3_offload_deactivate(struct adapter *adapter);
void cxgb3_set_dummy_ops(struct t3cdev *dev);
+struct t3cdev *dev2t3cdev(struct net_device *dev);
+
/*
* Client registration. Users of T3 driver must register themselves.
* The T3 driver will call the add function of every client for each T3
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 020859c855d7..aa80313c922e 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1160,6 +1160,8 @@
#define A_TP_MOD_CHANNEL_WEIGHT 0x434
+#define A_TP_MOD_RATE_LIMIT 0x438
+
#define A_TP_PIO_ADDR 0x440
#define A_TP_PIO_DATA 0x444
@@ -1214,6 +1216,15 @@
#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
M_TXDROPCNTCH0RCVD)
+#define A_TP_PROXY_FLOW_CNTL 0x4b0
+
+#define A_TP_EMBED_OP_FIELD0 0x4e8
+#define A_TP_EMBED_OP_FIELD1 0x4ec
+#define A_TP_EMBED_OP_FIELD2 0x4f0
+#define A_TP_EMBED_OP_FIELD3 0x4f4
+#define A_TP_EMBED_OP_FIELD4 0x4f8
+#define A_TP_EMBED_OP_FIELD5 0x4fc
+
#define A_ULPRX_CTL 0x500
#define S_ROUND_ROBIN 4
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a60ec4d4707c..58a5f60521ed 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -46,23 +46,16 @@
#define SGE_RX_SM_BUF_SIZE 1536
-/*
- * If USE_RX_PAGE is defined, the small freelist populated with (partial)
- * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
- * be a multiple of the host page size).
- */
-#define USE_RX_PAGE
-#define RX_PAGE_SIZE 2048
-
-/*
- * skb freelist packets are copied into a new skb (and the freelist one is
- * reused) if their len is <=
- */
#define SGE_RX_COPY_THRES 256
+#define SGE_RX_PULL_LEN 128
/*
- * Minimum number of freelist entries before we start dropping TUNNEL frames.
+ * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
+ * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
+ * directly.
*/
+#define FL0_PG_CHUNK_SIZE 2048
+
#define SGE_RX_DROP_THRES 16
/*
@@ -100,12 +93,12 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
struct sk_buff *skb;
};
-struct rx_sw_desc { /* SW state per Rx descriptor */
+struct rx_sw_desc { /* SW state per Rx descriptor */
union {
struct sk_buff *skb;
- struct sge_fl_page page;
- } t;
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
+ struct fl_pg_chunk pg_chunk;
+ };
+ DECLARE_PCI_UNMAP_ADDR(dma_addr);
};
struct rsp_desc { /* response queue descriptor */
@@ -351,27 +344,26 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
-
- if (q->buf_size != RX_PAGE_SIZE) {
- kfree_skb(d->t.skb);
- d->t.skb = NULL;
+ if (q->use_pages) {
+ put_page(d->pg_chunk.page);
+ d->pg_chunk.page = NULL;
} else {
- if (d->t.page.frag.page)
- put_page(d->t.page.frag.page);
- d->t.page.frag.page = NULL;
+ kfree_skb(d->skb);
+ d->skb = NULL;
}
if (++cidx == q->size)
cidx = 0;
}
- if (q->page.frag.page)
- put_page(q->page.frag.page);
- q->page.frag.page = NULL;
+ if (q->pg_chunk.page) {
+ __free_page(q->pg_chunk.page);
+ q->pg_chunk.page = NULL;
+ }
}
/**
* add_one_rx_buf - add a packet buffer to a free-buffer list
- * @va: va of the buffer to add
+ * @va: buffer start VA
* @len: the buffer length
* @d: the HW Rx descriptor to write
* @sd: the SW Rx descriptor to write
@@ -381,7 +373,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
* Add a buffer of the given length to the supplied HW and SW Rx
* descriptors.
*/
-static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
+static inline void add_one_rx_buf(void *va, unsigned int len,
struct rx_desc *d, struct rx_sw_desc *sd,
unsigned int gen, struct pci_dev *pdev)
{
@@ -397,6 +389,27 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
}
+static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
+{
+ if (!q->pg_chunk.page) {
+ q->pg_chunk.page = alloc_page(gfp);
+ if (unlikely(!q->pg_chunk.page))
+ return -ENOMEM;
+ q->pg_chunk.va = page_address(q->pg_chunk.page);
+ q->pg_chunk.offset = 0;
+ }
+ sd->pg_chunk = q->pg_chunk;
+
+ q->pg_chunk.offset += q->buf_size;
+ if (q->pg_chunk.offset == PAGE_SIZE)
+ q->pg_chunk.page = NULL;
+ else {
+ q->pg_chunk.va += q->buf_size;
+ get_page(q->pg_chunk.page);
+ }
+ return 0;
+}
+
/**
* refill_fl - refill an SGE free-buffer list
* @adapter: the adapter
@@ -410,49 +423,29 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
*/
static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
+ void *buf_start;
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
- struct sge_fl_page *p = &q->page;
while (n--) {
- unsigned char *va;
-
- if (unlikely(q->buf_size != RX_PAGE_SIZE)) {
- struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
-
- if (!skb) {
- q->alloc_failed++;
+ if (q->use_pages) {
+ if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
+nomem: q->alloc_failed++;
break;
}
- va = skb->data;
- sd->t.skb = skb;
+ buf_start = sd->pg_chunk.va;
} else {
- if (!p->frag.page) {
- p->frag.page = alloc_pages(gfp, 0);
- if (unlikely(!p->frag.page)) {
- q->alloc_failed++;
- break;
- } else {
- p->frag.size = RX_PAGE_SIZE;
- p->frag.page_offset = 0;
- p->va = page_address(p->frag.page);
- }
- }
+ struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
- memcpy(&sd->t, p, sizeof(*p));
- va = p->va;
+ if (!skb)
+ goto nomem;
- p->frag.page_offset += RX_PAGE_SIZE;
- BUG_ON(p->frag.page_offset > PAGE_SIZE);
- p->va += RX_PAGE_SIZE;
- if (p->frag.page_offset == PAGE_SIZE)
- p->frag.page = NULL;
- else
- get_page(p->frag.page);
+ sd->skb = skb;
+ buf_start = skb->data;
}
- add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev);
-
+ add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
+ adap->pdev);
d++;
sd++;
if (++q->pidx == q->size) {
@@ -487,7 +480,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
struct rx_desc *from = &q->desc[idx];
struct rx_desc *to = &q->desc[q->pidx];
- memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc));
+ q->sdesc[q->pidx] = q->sdesc[idx];
to->addr_lo = from->addr_lo; /* already big endian */
to->addr_hi = from->addr_hi; /* likewise */
wmb();
@@ -650,6 +643,132 @@ static inline unsigned int flits_to_desc(unsigned int n)
}
/**
+ * get_packet - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list and complete setup of the
+ * sk_buff. If the packet is small we make a copy and recycle the
+ * original buffer, otherwise we use the original buffer itself. If a
+ * positive drop threshold is supplied packets are dropped and their
+ * buffers recycled if (a) the number of remaining buffers is under the
+ * threshold and the packet is too big to copy, or (b) the packet should
+ * be copied but there is no memory for the copy.
+ */
+static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ prefetch(sd->skb->data);
+ fl->credits--;
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ pci_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb->data, sd->skb->data, len);
+ pci_dma_sync_single_for_device(adap->pdev,
+ pci_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ goto use_orig_buf;
+recycle:
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits < drop_thres))
+ goto recycle;
+
+use_orig_buf:
+ pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ skb = sd->skb;
+ skb_put(skb, len);
+ __refill_fl(adap, fl);
+ return skb;
+}
+
+/**
+ * get_packet_pg - return the next ingress packet buffer from a free list
+ * @adap: the adapter that received the packet
+ * @fl: the SGE free list holding the packet
+ * @len: the packet length including any SGE padding
+ * @drop_thres: # of remaining buffers before we start dropping packets
+ *
+ * Get the next packet from a free list populated with page chunks.
+ * If the packet is small we make a copy and recycle the original buffer,
+ * otherwise we attach the original buffer as a page fragment to a fresh
+ * sk_buff. If a positive drop threshold is supplied packets are dropped
+ * and their buffers recycled if (a) the number of remaining buffers is
+ * under the threshold and the packet is too big to copy, or (b) there's
+ * no system memory.
+ *
+ * Note: this function is similar to @get_packet but deals with Rx buffers
+ * that are page chunks rather than sk_buffs.
+ */
+static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
+ unsigned int len, unsigned int drop_thres)
+{
+ struct sk_buff *skb = NULL;
+ struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+
+ if (len <= SGE_RX_COPY_THRES) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (likely(skb != NULL)) {
+ __skb_put(skb, len);
+ pci_dma_sync_single_for_cpu(adap->pdev,
+ pci_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ memcpy(skb->data, sd->pg_chunk.va, len);
+ pci_dma_sync_single_for_device(adap->pdev,
+ pci_unmap_addr(sd, dma_addr), len,
+ PCI_DMA_FROMDEVICE);
+ } else if (!drop_thres)
+ return NULL;
+recycle:
+ fl->credits--;
+ recycle_rx_buf(adap, fl, fl->cidx);
+ return skb;
+ }
+
+ if (unlikely(fl->credits <= drop_thres))
+ goto recycle;
+
+ skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ if (!drop_thres)
+ return NULL;
+ goto recycle;
+ }
+
+ pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
+ fl->buf_size, PCI_DMA_FROMDEVICE);
+ __skb_put(skb, SGE_RX_PULL_LEN);
+ memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
+ skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
+ sd->pg_chunk.offset + SGE_RX_PULL_LEN,
+ len - SGE_RX_PULL_LEN);
+ skb->len = len;
+ skb->data_len = len - SGE_RX_PULL_LEN;
+ skb->truesize += skb->data_len;
+
+ fl->credits--;
+ /*
+ * We do not refill FLs here, we let the caller do it to overlap a
+ * prefetch.
+ */
+ return skb;
+}
+
+/**
* get_imm_packet - return the next ingress packet buffer from a response
* @resp: the response descriptor containing the packet data
*
@@ -954,7 +1073,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int ndesc, pidx, credits, gen, compl;
const struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = dev->priv;
+ struct adapter *adap = pi->adapter;
struct sge_qset *qs = dev2qset(dev);
struct sge_txq *q = &qs->txq[TXQ_ETH];
@@ -1207,7 +1326,8 @@ static void restart_ctrlq(unsigned long data)
struct sk_buff *skb;
struct sge_qset *qs = (struct sge_qset *)data;
struct sge_txq *q = &qs->txq[TXQ_CTRL];
- struct adapter *adap = qs->netdev->priv;
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
again:reclaim_completed_tx_imm(q);
@@ -1412,7 +1532,8 @@ static void restart_offloadq(unsigned long data)
struct sk_buff *skb;
struct sge_qset *qs = (struct sge_qset *)data;
struct sge_txq *q = &qs->txq[TXQ_OFLD];
- struct adapter *adap = qs->netdev->priv;
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
spin_lock(&q->lock);
again:reclaim_completed_tx(adap, q);
@@ -1556,7 +1677,8 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
*/
static int ofld_poll(struct net_device *dev, int *budget)
{
- struct adapter *adapter = dev->priv;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
struct sge_qset *qs = dev2qset(dev);
struct sge_rspq *q = &qs->rspq;
int work_done, limit = min(*budget, dev->quota), avail = limit;
@@ -1715,85 +1837,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
netif_rx(skb);
}
-#define SKB_DATA_SIZE 128
-
-static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
- unsigned int len)
-{
- skb->len = len;
- if (len <= SKB_DATA_SIZE) {
- skb_copy_to_linear_data(skb, p->va, len);
- skb->tail += len;
- put_page(p->frag.page);
- } else {
- skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
- skb_shinfo(skb)->frags[0].page = p->frag.page;
- skb_shinfo(skb)->frags[0].page_offset =
- p->frag.page_offset + SKB_DATA_SIZE;
- skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
- skb_shinfo(skb)->nr_frags = 1;
- skb->data_len = len - SKB_DATA_SIZE;
- skb->tail += SKB_DATA_SIZE;
- skb->truesize += skb->data_len;
- }
-}
-
-/**
-* get_packet - return the next ingress packet buffer from a free list
-* @adap: the adapter that received the packet
-* @fl: the SGE free list holding the packet
-* @len: the packet length including any SGE padding
-* @drop_thres: # of remaining buffers before we start dropping packets
-*
-* Get the next packet from a free list and complete setup of the
-* sk_buff. If the packet is small we make a copy and recycle the
-* original buffer, otherwise we use the original buffer itself. If a
-* positive drop threshold is supplied packets are dropped and their
-* buffers recycled if (a) the number of remaining buffers is under the
-* threshold and the packet is too big to copy, or (b) the packet should
-* be copied but there is no memory for the copy.
-*/
-static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
- unsigned int len, unsigned int drop_thres)
-{
- struct sk_buff *skb = NULL;
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
-
- prefetch(sd->t.skb->data);
-
- if (len <= SGE_RX_COPY_THRES) {
- skb = alloc_skb(len, GFP_ATOMIC);
- if (likely(skb != NULL)) {
- struct rx_desc *d = &fl->desc[fl->cidx];
- dma_addr_t mapping =
- (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
- be32_to_cpu(d->addr_lo));
-
- __skb_put(skb, len);
- pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
- PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(sd->t.skb, skb->data, len);
- pci_dma_sync_single_for_device(adap->pdev, mapping, len,
- PCI_DMA_FROMDEVICE);
- } else if (!drop_thres)
- goto use_orig_buf;
-recycle:
- recycle_rx_buf(adap, fl, fl->cidx);
- return skb;
- }
-
- if (unlikely(fl->credits < drop_thres))
- goto recycle;
-
-use_orig_buf:
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
- fl->buf_size, PCI_DMA_FROMDEVICE);
- skb = sd->t.skb;
- skb_put(skb, len);
- __refill_fl(adap, fl);
- return skb;
-}
-
/**
* handle_rsp_cntrl_info - handles control information in a response
* @qs: the queue set corresponding to the response
@@ -1935,7 +1978,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
} else if (flags & F_RSPD_IMM_DATA_VALID) {
skb = get_imm_packet(r);
if (unlikely(!skb)) {
- no_mem:
+no_mem:
q->next_holdoff = NOMEM_INTR_DELAY;
q->nomem++;
/* consume one credit since we tried */
@@ -1945,53 +1988,29 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
q->imm_data++;
ethpad = 0;
} else if ((len = ntohl(r->len_cq)) != 0) {
- struct sge_fl *fl =
- (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
-
- if (fl->buf_size == RX_PAGE_SIZE) {
- struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
- struct sge_fl_page *p = &sd->t.page;
+ struct sge_fl *fl;
- prefetch(p->va);
- prefetch(p->va + L1_CACHE_BYTES);
+ fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
+ if (fl->use_pages) {
+ void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
+ prefetch(addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(addr + L1_CACHE_BYTES);
+#endif
__refill_fl(adap, fl);
- pci_unmap_single(adap->pdev,
- pci_unmap_addr(sd, dma_addr),
- fl->buf_size,
- PCI_DMA_FROMDEVICE);
-
- if (eth) {
- if (unlikely(fl->credits <
- SGE_RX_DROP_THRES))
- goto eth_recycle;
-
- skb = alloc_skb(SKB_DATA_SIZE,
- GFP_ATOMIC);
- if (unlikely(!skb)) {
-eth_recycle:
- q->rx_drops++;
- recycle_rx_buf(adap, fl,
- fl->cidx);
- goto eth_done;
- }
- } else {
- skb = alloc_skb(SKB_DATA_SIZE,
- GFP_ATOMIC);
- if (unlikely(!skb))
- goto no_mem;
- }
-
- skb_data_init(skb, p, G_RSPD_LEN(len));
-eth_done:
- fl->credits--;
- q->eth_pkts++;
- } else {
- fl->credits--;
+ skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
+ eth ? SGE_RX_DROP_THRES : 0);
+ } else
skb = get_packet(adap, fl, G_RSPD_LEN(len),
eth ? SGE_RX_DROP_THRES : 0);
- }
+ if (unlikely(!skb)) {
+ if (!eth)
+ goto no_mem;
+ q->rx_drops++;
+ } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
+ __skb_pull(skb, 2);
if (++fl->cidx == fl->size)
fl->cidx = 0;
@@ -2016,20 +2035,15 @@ eth_done:
q->credits = 0;
}
- if (skb) {
- /* Preserve the RSS info in csum & priority */
- skb->csum = rss_hi;
- skb->priority = rss_lo;
-
+ if (likely(skb != NULL)) {
if (eth)
rx_eth(adap, q, skb, ethpad);
else {
- if (unlikely(r->rss_hdr.opcode ==
- CPL_TRACE_PKT))
- __skb_pull(skb, ethpad);
-
- ngathered = rx_offload(&adap->tdev, q,
- skb, offload_skbs,
+ /* Preserve the RSS info in csum & priority */
+ skb->csum = rss_hi;
+ skb->priority = rss_lo;
+ ngathered = rx_offload(&adap->tdev, q, skb,
+ offload_skbs,
ngathered);
}
}
@@ -2064,7 +2078,8 @@ static inline int is_pure_response(const struct rsp_desc *r)
*/
static int napi_rx_handler(struct net_device *dev, int *budget)
{
- struct adapter *adap = dev->priv;
+ const struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
struct sge_qset *qs = dev2qset(dev);
int effective_budget = min(*budget, dev->quota);
@@ -2194,7 +2209,8 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
- struct adapter *adap = qs->netdev->priv;
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
@@ -2213,7 +2229,8 @@ irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
{
struct sge_qset *qs = cookie;
- struct adapter *adap = qs->netdev->priv;
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
struct sge_rspq *q = &qs->rspq;
spin_lock(&q->lock);
@@ -2497,7 +2514,8 @@ static void sge_timer_cb(unsigned long data)
{
spinlock_t *lock;
struct sge_qset *qs = (struct sge_qset *)data;
- struct adapter *adap = qs->netdev->priv;
+ const struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
if (spin_trylock(&qs->txq[TXQ_ETH].lock)) {
reclaim_completed_tx(adap, &qs->txq[TXQ_ETH]);
@@ -2635,25 +2653,15 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->txq[TXQ_ETH].stop_thres = nports *
flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
- if (!is_offload(adapter)) {
-#ifdef USE_RX_PAGE
- q->fl[0].buf_size = RX_PAGE_SIZE;
+#if FL0_PG_CHUNK_SIZE > 0
+ q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
#else
- q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 +
- sizeof(struct cpl_rx_pkt);
+ q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
#endif
- q->fl[1].buf_size = MAX_FRAME_SIZE + 2 +
- sizeof(struct cpl_rx_pkt);
- } else {
-#ifdef USE_RX_PAGE
- q->fl[0].buf_size = RX_PAGE_SIZE;
-#else
- q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
- sizeof(struct cpl_rx_data);
-#endif
- q->fl[1].buf_size = (16 * 1024) -
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- }
+ q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
+ q->fl[1].buf_size = is_offload(adapter) ?
+ (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
spin_lock(&adapter->sge.reg_lock);
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index fb485d0a43d8..b02d15daf5d9 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -847,6 +847,98 @@ static int t3_write_flash(struct adapter *adapter, unsigned int addr,
return 0;
}
+/**
+ * t3_get_tp_version - read the tp sram version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the protocol sram version from sram.
+ */
+int t3_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ int ret;
+
+ /* Get version loaded in SRAM */
+ t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
+ ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
+ 1, 1, 5, 1);
+ if (ret)
+ return ret;
+
+ *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
+
+ return 0;
+}
+
+/**
+ * t3_check_tpsram_version - read the tp sram version
+ * @adapter: the adapter
+ * @must_load: set to 1 if loading a new microcode image is required
+ *
+ * Reads the protocol sram version from flash.
+ */
+int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
+{
+ int ret;
+ u32 vers;
+ unsigned int major, minor;
+
+ if (adapter->params.rev == T3_REV_A)
+ return 0;
+
+ *must_load = 1;
+
+ ret = t3_get_tp_version(adapter, &vers);
+ if (ret)
+ return ret;
+
+ major = G_TP_VERSION_MAJOR(vers);
+ minor = G_TP_VERSION_MINOR(vers);
+
+ if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
+ return 0;
+
+ if (major != TP_VERSION_MAJOR)
+ CH_ERR(adapter, "found wrong TP version (%u.%u), "
+ "driver needs version %d.%d\n", major, minor,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR);
+ else {
+ *must_load = 0;
+ CH_ERR(adapter, "found wrong TP version (%u.%u), "
+ "driver compiled for version %d.%d\n", major, minor,
+ TP_VERSION_MAJOR, TP_VERSION_MINOR);
+ }
+ return -EINVAL;
+}
+
+/**
+ * t3_check_tpsram - check if provided protocol SRAM
+ * is compatible with this driver
+ * @adapter: the adapter
+ * @tp_sram: the firmware image to write
+ * @size: image size
+ *
+ * Checks if an adapter's tp sram is compatible with the driver.
+ * Returns 0 if the versions are compatible, a negative error otherwise.
+ */
+int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
+{
+ u32 csum;
+ unsigned int i;
+ const u32 *p = (const u32 *)tp_sram;
+
+ /* Verify checksum */
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+ if (csum != 0xffffffff) {
+ CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
+ csum);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum fw_version_type {
FW_VERSION_N3,
FW_VERSION_T3
@@ -921,7 +1013,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
/*
* t3_load_fw - download firmware
* @adapter: the adapter
- * @fw_data: the firrware image to write
+ * @fw_data: the firmware image to write
* @size: image size
*
* Write the supplied firmware image to the card's serial flash.
@@ -2362,7 +2454,7 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
F_MTUENABLE | V_WINDOWSCALEMODE(1) |
- V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
+ V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
@@ -2371,16 +2463,18 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
F_IPV6ENABLE | F_NICMODE);
t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
- t3_set_reg_field(adap, A_TP_PARA_REG6,
- adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND,
- 0);
+ t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
+ adap->params.rev > 0 ? F_ENABLEESND :
+ F_T3A_ENABLEESND);
t3_set_reg_field(adap, A_TP_PC_CONFIG,
- F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL,
- F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE |
- F_RXCONGESTIONMODE);
+ F_ENABLEEPCMDAFULL,
+ F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
+ F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
-
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
+ t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
+
if (adap->params.rev > 0) {
tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
@@ -2390,9 +2484,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
} else
t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
- t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212);
- t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212);
- t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
+ t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
+ t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
+ t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
}
/* Desired TP timer resolution in usec */
@@ -2468,6 +2563,7 @@ int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
val |= F_RXCOALESCEENABLE;
if (psh)
val |= F_RXCOALESCEPSHEN;
+ size = min(MAX_RX_COALESCING_LEN, size);
t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
V_MAXRXDATA(MAX_RX_COALESCING_LEN));
}
@@ -2496,11 +2592,11 @@ static void __devinit init_mtus(unsigned short mtus[])
* it can accomodate max size TCP/IP headers when SACK and timestamps
* are enabled and still have at least 8 bytes of payload.
*/
- mtus[0] = 88;
- mtus[1] = 256;
- mtus[2] = 512;
- mtus[3] = 576;
- mtus[4] = 808;
+ mtus[1] = 88;
+ mtus[1] = 88;
+ mtus[2] = 256;
+ mtus[3] = 512;
+ mtus[4] = 576;
mtus[5] = 1024;
mtus[6] = 1280;
mtus[7] = 1492;
@@ -2682,6 +2778,34 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
}
+/**
+ * t3_set_proto_sram - set the contents of the protocol sram
+ * @adapter: the adapter
+ * @data: the protocol image
+ *
+ * Write the contents of the protocol SRAM.
+ */
+int t3_set_proto_sram(struct adapter *adap, u8 *data)
+{
+ int i;
+ u32 *buf = (u32 *)data;
+
+ for (i = 0; i < PROTO_SRAM_LINES; i++) {
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
+
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
+ if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
+ return -EIO;
+ }
+ t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
+
+ return 0;
+}
+
void t3_config_trace_filter(struct adapter *adapter,
const struct trace_params *tp, int filter_index,
int invert, int enable)
@@ -2802,7 +2926,7 @@ static void init_hw_for_avail_ports(struct adapter *adap, int nports)
t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
F_PORT0ACTIVE | F_ENFORCEPKT);
- t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000);
+ t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
} else {
t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
@@ -3097,7 +3221,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
else
t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
- t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000);
+ t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
init_hw_for_avail_ports(adapter, adapter->params.nports);
t3_sge_init(adapter, &adapter->params.sge);
diff --git a/drivers/net/cxgb3/t3cdev.h b/drivers/net/cxgb3/t3cdev.h
index fa4099bc0416..77fcc1a4984e 100644
--- a/drivers/net/cxgb3/t3cdev.h
+++ b/drivers/net/cxgb3/t3cdev.h
@@ -42,9 +42,6 @@
#define T3CNAMSIZ 16
-/* Get the t3cdev associated with a net_device */
-#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
-
struct cxgb3_client;
enum t3ctype {
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index b112317f033e..eb508bf8022a 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
/* Firmware version */
#define FW_VERSION_MAJOR 4
-#define FW_VERSION_MINOR 0
+#define FW_VERSION_MINOR 3
#define FW_VERSION_MICRO 0
#endif /* __CHELSIO_VERSION_H */