From fc9b4910b00039da054f221e2821be0519261101 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sat, 11 Jun 2011 08:39:55 +0000 Subject: net: ep93xx_eth: pass struct device to DMA API functions We shouldn't use NULL for any DMA API functions, unless we are dealing with ISA or EISA device. So pass correct struct dev pointer to these functions. Signed-off-by: Mika Westerberg Signed-off-by: David S. Miller --- drivers/net/arm/ep93xx_eth.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers/net/arm/ep93xx_eth.c') diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 5a77001b6d10..f65dfb6e5705 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -284,7 +284,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb = dev_alloc_skb(length + 2); if (likely(skb != NULL)) { skb_reserve(skb, 2); - dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, + dma_sync_single_for_cpu(dev->dev.parent, ep->descs->rdesc[entry].buf_addr, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); skb_put(skb, length); @@ -362,7 +362,7 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) ep->descs->tdesc[entry].tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); - dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, + dma_sync_single_for_cpu(dev->dev.parent, ep->descs->tdesc[entry].buf_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); @@ -457,6 +457,7 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id) static void ep93xx_free_buffers(struct ep93xx_priv *ep) { + struct device *dev = ep->dev->dev.parent; int i; for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { @@ -464,7 +465,7 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) d = ep->descs->rdesc[i].buf_addr; if (d) - dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_single(dev, d, PAGE_SIZE, DMA_FROM_DEVICE); if (ep->rx_buf[i] != NULL) free_page((unsigned long)ep->rx_buf[i]); @@ -475,13 +476,13 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) d = ep->descs->tdesc[i].buf_addr; if (d) - dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, d, PAGE_SIZE, DMA_TO_DEVICE); if (ep->tx_buf[i] != NULL) free_page((unsigned long)ep->tx_buf[i]); } - dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, + dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); } @@ -491,9 +492,10 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) */ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) { + struct device *dev = ep->dev->dev.parent; int i; - ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), + ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); if (ep->descs == NULL) return 1; @@ -506,8 +508,8 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) if (page == NULL) goto err; - d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(NULL, d)) { + d = dma_map_single(dev, page, PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, d)) { free_page((unsigned long)page); goto err; } @@ -529,8 +531,8 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) if (page == NULL) goto err; - d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); - if (dma_mapping_error(NULL, d)) { + d = dma_map_single(dev, page, PAGE_SIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, d)) { free_page((unsigned long)page); goto err; } @@ -829,6 +831,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev) } ep = netdev_priv(dev); ep->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); platform_set_drvdata(pdev, dev); -- cgit v1.2.3 From 3247a1fcee49b571b40c4bd723439ce5c64f56ad Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sat, 11 Jun 2011 08:39:56 +0000 Subject: net: ep93xx_eth: allocate buffers using kmalloc() We can use simply kmalloc() to allocate the buffers. This also simplifies the code and allows us to perform DMA sync operations more easily. Memory is allocated with only GFP_KERNEL since there are no DMA allocation restrictions on this platform. Signed-off-by: Mika Westerberg Acked-by: Russell King Acked-by: H Hartley Sweeten Tested-by: Petr Stetiar Signed-off-by: David S. Miller --- drivers/net/arm/ep93xx_eth.c | 51 +++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 31 deletions(-) (limited to 'drivers/net/arm/ep93xx_eth.c') diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index f65dfb6e5705..97bf6b10ce61 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -460,36 +460,32 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep) struct device *dev = ep->dev->dev.parent; int i; - for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->rdesc[i].buf_addr; if (d) - dma_unmap_single(dev, d, PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE); if (ep->rx_buf[i] != NULL) - free_page((unsigned long)ep->rx_buf[i]); + kfree(ep->rx_buf[i]); } - for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { dma_addr_t d; d = ep->descs->tdesc[i].buf_addr; if (d) - dma_unmap_single(dev, d, PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE); if (ep->tx_buf[i] != NULL) - free_page((unsigned long)ep->tx_buf[i]); + kfree(ep->tx_buf[i]); } dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs, ep->descs_dma_addr); } -/* - * The hardware enforces a sub-2K maximum packet size, so we put - * two buffers on every hardware page. - */ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) { struct device *dev = ep->dev->dev.parent; @@ -500,48 +496,41 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) if (ep->descs == NULL) return 1; - for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { - void *page; + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + void *buf; dma_addr_t d; - page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); - if (page == NULL) + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) goto err; - d = dma_map_single(dev, page, PAGE_SIZE, DMA_FROM_DEVICE); + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, d)) { - free_page((unsigned long)page); + kfree(buf); goto err; } - ep->rx_buf[i] = page; + ep->rx_buf[i] = buf; ep->descs->rdesc[i].buf_addr = d; ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; - - ep->rx_buf[i + 1] = page + PKT_BUF_SIZE; - ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; - ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE; } - for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { - void *page; + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + void *buf; dma_addr_t d; - page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); - if (page == NULL) + buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL); + if (buf == NULL) goto err; - d = dma_map_single(dev, page, PAGE_SIZE, DMA_TO_DEVICE); + d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, d)) { - free_page((unsigned long)page); + kfree(buf); goto err; } - ep->tx_buf[i] = page; + ep->tx_buf[i] = buf; ep->descs->tdesc[i].buf_addr = d; - - ep->tx_buf[i + 1] = page + PKT_BUF_SIZE; - ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE; } return 0; -- cgit v1.2.3 From 1f758a4341ac83289a549e6ba2d29a08cf639717 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sat, 11 Jun 2011 08:39:57 +0000 Subject: net: ep93xx_eth: drop GFP_DMA from call to dma_alloc_coherent() Commit a197b59ae6e8 (mm: fail GFP_DMA allocations when ZONE_DMA is not configured) made page allocator to return NULL if GFP_DMA is set but CONFIG_ZONE_DMA is disabled. This causes ep93xx_eth to fail: WARNING: at mm/page_alloc.c:2251 __alloc_pages_nodemask+0x11c/0x638() Modules linked in: [] (unwind_backtrace+0x0/0xf4) from [] (warn_slowpath_common+0x48/0x60) [] (warn_slowpath_common+0x48/0x60) from [] (warn_slowpath_null+0x1c/0x24) [] (warn_slowpath_null+0x1c/0x24) from [] (__alloc_pages_nodemask+0x11c/0x638) [] (__alloc_pages_nodemask+0x11c/0x638) from [] (__dma_alloc+0x8c/0x3ec) [] (__dma_alloc+0x8c/0x3ec) from [] (dma_alloc_coherent+0x54/0x60) [] (dma_alloc_coherent+0x54/0x60) from [] (ep93xx_open+0x20/0x864) [] (ep93xx_open+0x20/0x864) from [] (__dev_open+0xb8/0x108) [] (__dev_open+0xb8/0x108) from [] (__dev_change_flags+0x70/0x128) [] (__dev_change_flags+0x70/0x128) from [] (dev_change_flags+0x10/0x48) [] (dev_change_flags+0x10/0x48) from [] (ip_auto_config+0x190/0xf68) [] (ip_auto_config+0x190/0xf68) from [] (do_one_initcall+0x34/0x18c) [] (do_one_initcall+0x34/0x18c) from [] (kernel_init+0x94/0x134) [] (kernel_init+0x94/0x134) from [] (kernel_thread_exit+0x0/0x8) Since there is no restrictions for DMA on ep93xx, we can fix this by just removing the GFP_DMA flag from the call. Signed-off-by: Mika Westerberg Acked-by: Russell King Acked-by: H Hartley Sweeten Tested-by: Petr Stetiar Signed-off-by: David S. Miller --- drivers/net/arm/ep93xx_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/arm/ep93xx_eth.c') diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 97bf6b10ce61..bef38116abaf 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -492,7 +492,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) int i; ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs), - &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); + &ep->descs_dma_addr, GFP_KERNEL); if (ep->descs == NULL) return 1; -- cgit v1.2.3 From f1c089e3192f1afdfa76226dc38ef81b08ac810d Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Sat, 11 Jun 2011 08:39:58 +0000 Subject: net: ep93xx_eth: fix DMA API violations Russell King said: > > So, to summarize what its doing: > > 1. It allocates buffers for rx and tx. > 2. It maps them with dma_map_single(). > This transfers ownership of the buffer to the DMA device. > 3. In ep93xx_xmit, > 3a. It copies the data into the buffer with skb_copy_and_csum_dev() > This violates the DMA buffer ownership rules - the CPU should > not be writing to this buffer while it is (in principle) owned > by the DMA device. > 3b. It then calls dma_sync_single_for_cpu() for the buffer. > This transfers ownership of the buffer to the CPU, which surely > is the wrong direction. > 4. In ep93xx_rx, > 4a. It calls dma_sync_single_for_cpu() for the buffer. > This at least transfers the DMA buffer ownership to the CPU > before the CPU reads the buffer > 4b. It then uses skb_copy_to_linear_data() to copy the data out. > At no point does it transfer ownership back to the DMA device. > 5. When the driver is removed, it dma_unmap_single()'s the buffer. > This transfers ownership of the buffer to the CPU. > 6. It frees the buffer. > > While it may work on ep93xx, it's not respecting the DMA API rules, > and with DMA debugging enabled it will probably encounter quite a few > warnings. This patch fixes these violations. Signed-off-by: Mika Westerberg Acked-by: Russell King Acked-by: H Hartley Sweeten Tested-by: Petr Stetiar Signed-off-by: David S. Miller --- drivers/net/arm/ep93xx_eth.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/net/arm/ep93xx_eth.c') diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index bef38116abaf..0b46b8ea0e80 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -283,10 +283,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) skb = dev_alloc_skb(length + 2); if (likely(skb != NULL)) { + struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry]; skb_reserve(skb, 2); - dma_sync_single_for_cpu(dev->dev.parent, ep->descs->rdesc[entry].buf_addr, + dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr, length, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); + dma_sync_single_for_device(dev->dev.parent, + rxd->buf_addr, length, + DMA_FROM_DEVICE); skb_put(skb, length); skb->protocol = eth_type_trans(skb, dev); @@ -348,6 +352,7 @@ poll_some_more: static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) { struct ep93xx_priv *ep = netdev_priv(dev); + struct ep93xx_tdesc *txd; int entry; if (unlikely(skb->len > MAX_PKT_SIZE)) { @@ -359,11 +364,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) entry = ep->tx_pointer; ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); - ep->descs->tdesc[entry].tdesc1 = - TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); + txd = &ep->descs->tdesc[entry]; + + txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); + dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); - dma_sync_single_for_cpu(dev->dev.parent, ep->descs->tdesc[entry].buf_addr, - skb->len, DMA_TO_DEVICE); + dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len, + DMA_TO_DEVICE); dev_kfree_skb(skb); spin_lock_irq(&ep->tx_pending_lock); -- cgit v1.2.3