summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mthca/mthca_main.c
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@mellanox.co.il>2007-02-10 23:15:08 +0200
committerRoland Dreier <rolandd@cisco.com>2007-02-12 16:16:29 -0800
commit391e4dea7189eef32b0c2d121e7e047110c1b83c (patch)
tree99cfb7f912837fb6f37ae290c9f1345d218eab06 /drivers/infiniband/hw/mthca/mthca_main.c
parent1d1f19cfce7687b557cebdc41bf8a5eeba8a9882 (diff)
IB/mthca: Fix access to MTT and MPT tables on non-cache-coherent CPUs
We allocate the MTT table with alloc_pages() and then do pci_map_sg(), so we must call pci_dma_sync_sg() after the CPU writes to the MTT table. This works since the device will never write MTTs on mem-free HCAs, once we get rid of the use of the WRITE_MTT firmware command. This change is needed to make that work, and is an improvement for now, since it gives FMRs a chance at working. For MPTs, both the device and CPU might write there, so we must allocate DMA coherent memory for these. Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_main.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 9a9dd32885a0..0d9b7d06bbc2 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -379,7 +379,7 @@ static int mthca_load_fw(struct mthca_dev *mdev)
mdev->fw.arbel.fw_icm =
mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.fw_icm) {
mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
@@ -412,7 +412,7 @@ err_unmap_fa:
mthca_UNMAP_FA(mdev, &status);
err_free:
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
return err;
}
@@ -441,7 +441,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
(unsigned long long) aux_pages << 2);
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.aux_icm) {
mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
@@ -471,7 +471,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
MTHCA_MTT_SEG_SIZE,
mdev->limits.num_mtt_segs,
- mdev->limits.reserved_mtts, 1);
+ mdev->limits.reserved_mtts,
+ 1, 0);
if (!mdev->mr_table.mtt_table) {
mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
err = -ENOMEM;
@@ -481,7 +482,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
dev_lim->mpt_entry_sz,
mdev->limits.num_mpts,
- mdev->limits.reserved_mrws, 1);
+ mdev->limits.reserved_mrws,
+ 1, 1);
if (!mdev->mr_table.mpt_table) {
mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
err = -ENOMEM;
@@ -491,7 +493,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
dev_lim->qpc_entry_sz,
mdev->limits.num_qps,
- mdev->limits.reserved_qps, 0);
+ mdev->limits.reserved_qps,
+ 0, 0);
if (!mdev->qp_table.qp_table) {
mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
err = -ENOMEM;
@@ -501,7 +504,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
dev_lim->eqpc_entry_sz,
mdev->limits.num_qps,
- mdev->limits.reserved_qps, 0);
+ mdev->limits.reserved_qps,
+ 0, 0);
if (!mdev->qp_table.eqp_table) {
mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
err = -ENOMEM;
@@ -511,7 +515,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
MTHCA_RDB_ENTRY_SIZE,
mdev->limits.num_qps <<
- mdev->qp_table.rdb_shift,
+ mdev->qp_table.rdb_shift, 0,
0, 0);
if (!mdev->qp_table.rdb_table) {
mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
@@ -522,7 +526,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
dev_lim->cqc_entry_sz,
mdev->limits.num_cqs,
- mdev->limits.reserved_cqs, 0);
+ mdev->limits.reserved_cqs,
+ 0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
@@ -534,7 +539,8 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
- mdev->limits.reserved_srqs, 0);
+ mdev->limits.reserved_srqs,
+ 0, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
@@ -554,7 +560,7 @@ static int mthca_init_icm(struct mthca_dev *mdev,
mdev->limits.num_amgms,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
- 0);
+ 0, 0);
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
@@ -592,7 +598,7 @@ err_unmap_aux:
mthca_UNMAP_ICM_AUX(mdev, &status);
err_free_aux:
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
return err;
}
@@ -613,7 +619,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
mthca_unmap_eq_icm(mdev);
mthca_UNMAP_ICM_AUX(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
static int mthca_init_arbel(struct mthca_dev *mdev)
@@ -697,7 +703,7 @@ err_free_icm:
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
@@ -716,7 +722,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);