summaryrefslogtreecommitdiff
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/catas.c106
-rw-r--r--drivers/net/mlx4/cmd.c3
-rw-r--r--drivers/net/mlx4/eq.c56
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/fw.h1
-rw-r--r--drivers/net/mlx4/intf.c2
-rw-r--r--drivers/net/mlx4/main.c27
-rw-r--r--drivers/net/mlx4/mlx4.h14
-rw-r--r--drivers/net/mlx4/mr.c15
-rw-r--r--drivers/net/mlx4/qp.c24
-rw-r--r--drivers/net/mlx4/reset.c3
-rw-r--r--drivers/net/mlx4/srq.c30
12 files changed, 195 insertions, 89 deletions
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index 1bb088aeaf71..6b32ec94b3a8 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -30,41 +30,133 @@
* SOFTWARE.
*/
+#include <linux/workqueue.h>
+
#include "mlx4.h"
-void mlx4_handle_catas_err(struct mlx4_dev *dev)
+enum {
+ MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
+};
+
+static DEFINE_SPINLOCK(catas_lock);
+
+static LIST_HEAD(catas_list);
+static struct workqueue_struct *catas_wq;
+static struct work_struct catas_work;
+
+static int internal_err_reset = 1;
+module_param(internal_err_reset, int, 0644);
+MODULE_PARM_DESC(internal_err_reset,
+ "Reset device on internal errors if non-zero (default 1)");
+
+static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- mlx4_err(dev, "Catastrophic error detected:\n");
+ mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
+}
- mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+static void poll_catas(unsigned long dev_ptr)
+{
+ struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (readl(priv->catas_err.map)) {
+ dump_err_buf(dev);
+
+ mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0);
+
+ if (internal_err_reset) {
+ spin_lock(&catas_lock);
+ list_add(&priv->catas_err.list, &catas_list);
+ spin_unlock(&catas_lock);
+
+ queue_work(catas_wq, &catas_work);
+ }
+ } else
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
}
-void mlx4_map_catas_buf(struct mlx4_dev *dev)
+static void catas_reset(struct work_struct *work)
+{
+ struct mlx4_priv *priv, *tmppriv;
+ struct mlx4_dev *dev;
+
+ LIST_HEAD(tlist);
+ int ret;
+
+ spin_lock_irq(&catas_lock);
+ list_splice_init(&catas_list, &tlist);
+ spin_unlock_irq(&catas_lock);
+
+ list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
+ ret = mlx4_restart_one(priv->dev.pdev);
+ dev = &priv->dev;
+ if (ret)
+ mlx4_err(dev, "Reset failed (%d)\n", ret);
+ else
+ mlx4_dbg(dev, "Reset succeeded\n");
+ }
+}
+
+void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long addr;
+ INIT_LIST_HEAD(&priv->catas_err.list);
+ init_timer(&priv->catas_err.timer);
+ priv->catas_err.map = NULL;
+
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
- if (!priv->catas_err.map)
- mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n",
+ if (!priv->catas_err.map) {
+ mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n",
addr);
+ return;
+ }
+ priv->catas_err.timer.data = (unsigned long) dev;
+ priv->catas_err.timer.function = poll_catas;
+ priv->catas_err.timer.expires =
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
+ add_timer(&priv->catas_err.timer);
}
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev)
+void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ del_timer_sync(&priv->catas_err.timer);
+
if (priv->catas_err.map)
iounmap(priv->catas_err.map);
+
+ spin_lock_irq(&catas_lock);
+ list_del(&priv->catas_err.list);
+ spin_unlock_irq(&catas_lock);
+}
+
+int __init mlx4_catas_init(void)
+{
+ INIT_WORK(&catas_work, catas_reset);
+
+ catas_wq = create_singlethread_workqueue("mlx4_err");
+ if (!catas_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_catas_cleanup(void)
+{
+ destroy_workqueue(catas_wq);
}
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index c1f81a993f5d..a9f31753661a 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -246,8 +246,6 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
- context->token += priv->cmd.token_mask + 1;
-
complete(&context->done);
}
@@ -264,6 +262,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 27a82cecd693..2095c843fa15 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -89,14 +89,12 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
- (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD))
-#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
struct mlx4_eqe {
u8 reserved1;
@@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
@@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
-static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
-{
- mlx4_handle_catas_err(dev_ptr);
-
- /* MSI-X vectors always belong to us */
- return IRQ_HANDLED;
-}
-
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
@@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
if (eq_table->eq[i].have_irq)
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
- if (eq_table->eq[MLX4_EQ_CATAS].have_irq)
- free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev);
}
static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
if (dev->flags & MLX4_FLAG_MSI_X) {
static const char *eq_name[] = {
[MLX4_EQ_COMP] = DRV_NAME " (comp)",
- [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
- [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
+ [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
};
- err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
- &priv->eq_table.eq[MLX4_EQ_CATAS]);
- if (err)
- goto err_out_async;
-
- for (i = 0; i < MLX4_EQ_CATAS; ++i) {
+ for (i = 0; i < MLX4_NUM_EQ; ++i) {
err = request_irq(priv->eq_table.eq[i].irq,
mlx4_msi_x_interrupt,
0, eq_name[i], priv->eq_table.eq + i);
if (err)
- goto err_out_catas;
+ goto err_out_async;
priv->eq_table.eq[i].have_irq = 1;
}
- err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
- mlx4_catas_interrupt, 0,
- eq_name[MLX4_EQ_CATAS], dev);
- if (err)
- goto err_out_catas;
-
- priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
} else {
err = request_irq(dev->pdev->irq, mlx4_interrupt,
IRQF_SHARED, DRV_NAME, dev);
@@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
eq_set_ci(&priv->eq_table.eq[i], 1);
- if (dev->flags & MLX4_FLAG_MSI_X) {
- err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
- if (err)
- mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
- }
-
return 0;
-err_out_catas:
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
-
err_out_async:
mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
@@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
- priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
-
mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_free_irqs(dev);
- for (i = 0; i < MLX4_EQ_CATAS; ++i)
+ for (i = 0; i < MLX4_NUM_EQ; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- if (dev->flags & MLX4_FLAG_MSI_X)
- mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
mlx4_unmap_clr_int(dev);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index d2b065351e45..c45cbe43a0c4 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -138,6 +138,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
+#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
@@ -220,6 +221,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->local_ca_ack_delay = field & 0x1f;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->num_ports = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
+ dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 296254ac27c1..7e1dd9e25cfb 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -60,6 +60,7 @@ struct mlx4_dev_cap {
int max_rdma_global;
int local_ca_ack_delay;
int num_ports;
+ u32 max_msg_sz;
int max_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 9ae951bf6aa6..be5d9e90ccf2 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c3da2a2f5431..4dc9dc19b716 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -78,7 +78,7 @@ static const char mlx4_version[] __devinitdata =
static struct mlx4_profile default_profile = {
.num_qp = 1 << 16,
.num_srq = 1 << 16,
- .rdmarc_per_qp = 4,
+ .rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 17,
@@ -154,6 +154,7 @@ static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
+ dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
@@ -582,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
goto err_pd_table_free;
}
- mlx4_map_catas_buf(dev);
-
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_catas_buf;
+ goto err_mr_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -658,8 +657,7 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
-err_catas_buf:
- mlx4_unmap_catas_buf(dev);
+err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_pd_table_free:
@@ -835,9 +833,6 @@ err_cleanup:
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
@@ -884,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
-
- mlx4_unmap_catas_buf(dev);
-
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -907,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev)
}
}
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+ mlx4_remove_one(pdev);
+ return mlx4_init_one(pdev, NULL);
+}
+
static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
@@ -929,6 +927,10 @@ static int __init mlx4_init(void)
{
int ret;
+ ret = mlx4_catas_init();
+ if (ret)
+ return ret;
+
ret = pci_register_driver(&mlx4_driver);
return ret < 0 ? ret : 0;
}
@@ -936,6 +938,7 @@ static int __init mlx4_init(void)
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
+ mlx4_catas_cleanup();
}
module_init(mlx4_init);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 3d3b6d24d8d3..be304a7c2c91 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -37,7 +37,9 @@
#ifndef MLX4_H
#define MLX4_H
+#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/timer.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -66,7 +68,6 @@ enum {
enum {
MLX4_EQ_ASYNC,
MLX4_EQ_COMP,
- MLX4_EQ_CATAS,
MLX4_NUM_EQ
};
@@ -247,7 +248,8 @@ struct mlx4_mcg_table {
struct mlx4_catas_err {
u32 __iomem *map;
- int size;
+ struct timer_list timer;
+ struct list_head list;
};
struct mlx4_priv {
@@ -310,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-void mlx4_map_catas_buf(struct mlx4_dev *dev);
-void mlx4_unmap_catas_buf(struct mlx4_dev *dev);
-
+void mlx4_start_catas_poll(struct mlx4_dev *dev);
+void mlx4_stop_catas_poll(struct mlx4_dev *dev);
+int mlx4_catas_init(void);
+void mlx4_catas_cleanup(void);
+int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type,
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d0808fa3ec82..5b87183e62ce 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -255,10 +255,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int err;
index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1) {
- err = -ENOMEM;
- goto err;
- }
+ if (index == -1)
+ return -ENOMEM;
mr->iova = iova;
mr->size = size;
@@ -269,15 +267,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
- goto err_index;
-
- return 0;
-
-err_index:
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
-err:
- kfree(mr);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 7f8b7d55b6e1..19b48c71cf7f 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -113,8 +113,7 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
- if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
- new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
+ if (cur_state >= MLX4_QP_NUM_STATE || cur_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
@@ -278,3 +277,24 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ if (!err)
+ memcpy(context, mailbox->buf + 8, sizeof *context);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_query);
+
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e4dfd4b11a4a..e199715fabd0 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -119,6 +119,9 @@ int mlx4_reset(struct mlx4_dev *dev)
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
iounmap(reset);
+ /* Docs say to wait one second before accessing device */
+ msleep(1000);
+
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do {
if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) &&
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 2134f83aed87..b061c86d6839 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -102,6 +102,13 @@ static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
MLX4_CMD_TIME_CLASS_B);
}
+static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
+ int srq_num)
+{
+ return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
+ MLX4_CMD_TIME_CLASS_A);
+}
+
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
u64 db_rec, struct mlx4_srq *srq)
{
@@ -205,6 +212,29 @@ int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark
}
EXPORT_SYMBOL_GPL(mlx4_srq_arm);
+int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ srq_context = mailbox->buf;
+
+ err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
+ if (err)
+ goto err_out;
+ *limit_watermark = srq_context->limit_watermark;
+
+err_out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_srq_query);
+
int __devinit mlx4_init_srq_table(struct mlx4_dev *dev)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;