summaryrefslogtreecommitdiff
path: root/drivers/net/cxgb4
diff options
context:
space:
mode:
authorDimitris Michailidis <dm@chelsio.com>2010-12-14 21:36:55 +0000
committerDavid S. Miller <davem@davemloft.net>2010-12-16 13:16:04 -0800
commitad6bad3efbb82206837daad8cba2bc9343d778bf (patch)
tree25980911778dba0ec65c566fd9064fee83877f21 /drivers/net/cxgb4
parent23d88e1d3e4a5b807ce6725f9294b7b9dfcd89a1 (diff)
cxgb4: NUMA-aware Tx queue allocations
Allocate Tx queue memory on the node indicated by the new netdev_queue_numa_node_read. Signed-off-by: Dimitris Michailidis <dm@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb4')
-rw-r--r--drivers/net/cxgb4/sge.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index cc0b9975d472..311471b439a8 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -579,6 +579,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
* @phys: the physical address of the allocated ring
* @metadata: address of the array holding the SW state for the ring
* @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
*
* Allocates resources for an SGE descriptor ring, such as Tx queues,
* free buffer lists, or response queues. Each SGE ring requires
@@ -590,7 +591,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
*/
static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
size_t sw_size, dma_addr_t *phys, void *metadata,
- size_t stat_size)
+ size_t stat_size, int node)
{
size_t len = nelem * elem_size + stat_size;
void *s = NULL;
@@ -599,7 +600,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
if (!p)
return NULL;
if (sw_size) {
- s = kcalloc(nelem, sw_size, GFP_KERNEL);
+ s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
if (!s) {
dma_free_coherent(dev, len, p, *phys);
@@ -1982,7 +1983,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = roundup(iq->size, 16);
iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
- &iq->phys_addr, NULL, 0);
+ &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
if (!iq->desc)
return -ENOMEM;
@@ -2008,7 +2009,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = roundup(fl->size, 8);
fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
sizeof(struct rx_sw_desc), &fl->addr,
- &fl->sdesc, STAT_LEN);
+ &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
if (!fl->desc)
goto fl_nomem;
@@ -2095,7 +2096,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ netdev_queue_numa_node_read(netdevq));
if (!txq->q.desc)
return -ENOMEM;
@@ -2147,7 +2149,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
sizeof(struct tx_desc), 0, &txq->q.phys_addr,
- NULL, 0);
+ NULL, 0, NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;
@@ -2198,7 +2200,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
+ NUMA_NO_NODE);
if (!txq->q.desc)
return -ENOMEM;