From 0ca3ff10db648a38b0aeb219a19980fec6ae5135 Mon Sep 17 00:00:00 2001 From: Russell King - ARM Linux Date: Tue, 6 Mar 2012 22:34:06 +0000 Subject: dmaengine: move last completed cookie into generic dma_chan structure Every DMA engine implementation declares a last completed dma cookie in their private dma channel structures. This is pointless, and forces driver specific code. Move this out into the common dma_chan structure. Signed-off-by: Russell King Tested-by: Linus Walleij Reviewed-by: Linus Walleij Acked-by: Jassi Brar [imx-sdma.c & mxs-dma.c] Tested-by: Shawn Guo Signed-off-by: Vinod Koul cherry-picked from mainline 4d4e58de32a192fea65ab84509d17d199bd291c8 Change-Id: Ib653bcfa5f492986946fd34006a8de3090db0441 Signed-off-by: Laxman Dewangan Reviewed-on: http://git-master/r/93778 --- arch/arm/include/asm/hardware/iop_adma.h | 2 - drivers/dma/amba-pl08x.c | 8 +- drivers/dma/at_hdmac.c | 8 +- drivers/dma/at_hdmac_regs.h | 2 - drivers/dma/coh901318.c | 7 +- drivers/dma/dw_dmac.c | 10 +- drivers/dma/dw_dmac_regs.h | 1 - drivers/dma/ep93xx_dma.c | 16 +- drivers/dma/fsldma.c | 4 +- drivers/dma/fsldma.h | 1 - drivers/dma/imx-dma.c | 7 +- drivers/dma/imx-sdma.c | 7 +- drivers/dma/intel_mid_dma.c | 9 +- drivers/dma/intel_mid_dma_regs.h | 2 - drivers/dma/ioat/dma.c | 2 +- drivers/dma/ioat/dma.h | 4 +- drivers/dma/ioat/dma_v2.c | 2 +- drivers/dma/ioat/dma_v3.c | 2 +- drivers/dma/iop-adma.c | 10 +- drivers/dma/ipu/ipu_idmac.c | 10 +- drivers/dma/mpc512x_dma.c | 7 +- drivers/dma/mv_xor.c | 6 +- drivers/dma/mv_xor.h | 2 - drivers/dma/mxs-dma.c | 6 +- drivers/dma/pch_dma.c | 5 +- drivers/dma/pl330.c | 10 +- drivers/dma/ppc4xx/adma.c | 10 +- drivers/dma/ppc4xx/adma.h | 2 - drivers/dma/shdma.c | 10 +- drivers/dma/shdma.h | 1 - drivers/dma/sirf-dma.c | 706 +++++++++++++++++++++++++++++++ drivers/dma/ste_dma40.c | 10 +- drivers/dma/timb_dma.c | 7 +- drivers/dma/txx9dmac.c | 10 +- drivers/dma/txx9dmac.h | 1 - include/linux/amba/pl08x.h | 3 +- include/linux/dmaengine.h | 2 + 37 files changed, 798 insertions(+), 114 deletions(-) create mode 100644 drivers/dma/sirf-dma.c diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 59b8c3892f76..122f86d8c991 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -49,7 +49,6 @@ struct iop_adma_device { /** * struct iop_adma_chan - internal representation of an ADMA device * @pending: allows batching of hardware operations - * @completed_cookie: identifier for the most recently completed operation * @lock: serializes enqueue/dequeue operations to the slot pool * @mmr_base: memory mapped register base * @chain: device chain view of the descriptors @@ -62,7 +61,6 @@ struct iop_adma_device { */ struct iop_adma_chan { int pending; - dma_cookie_t completed_cookie; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; struct list_head chain; diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index be21e3f138a8..a8cdd8307490 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1020,7 +1020,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, u32 bytesleft = 0; last_used = plchan->chan.cookie; - last_complete = plchan->lc; + last_complete = plchan->chan.completed_cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) { @@ -1032,7 +1032,7 @@ pl08x_dma_tx_status(struct dma_chan *chan, * This cookie not complete yet */ last_used = plchan->chan.cookie; - last_complete = plchan->lc; + last_complete = plchan->chan.completed_cookie; /* Get number of bytes left in the active transactions and queue */ bytesleft = pl08x_getbytes_chan(plchan); @@ -1552,7 +1552,7 @@ static void pl08x_tasklet(unsigned long data) if (txd) { /* Update last completed */ - plchan->lc = txd->tx.cookie; + plchan->chan.completed_cookie = txd->tx.cookie; } /* If a new descriptor is queued, set it up plchan->at is NULL here */ @@ -1734,7 +1734,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->chan.device = dmadev; chan->chan.cookie = 0; - chan->lc = 0; + chan->chan.completed_cookie = 0; spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->pend_list); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6a483eac7b3f..54f6f19323ca 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -265,7 +265,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dev_vdbg(chan2dev(&atchan->chan_common), "descriptor %u complete\n", txd->cookie); - atchan->completed_cookie = txd->cookie; + atchan->chan_common.completed_cookie = txd->cookie; /* move children to free_list */ list_splice_init(&desc->tx_list, &atchan->free_list); @@ -1008,14 +1008,14 @@ atc_tx_status(struct dma_chan *chan, spin_lock_bh(&atchan->lock); - last_complete = atchan->completed_cookie; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { atc_cleanup_descriptors(atchan); - last_complete = atchan->completed_cookie; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -1264,8 +1264,8 @@ static int __init at_dma_probe(struct platform_device *pdev) struct at_dma_chan *atchan = &atdma->chan[i]; atchan->chan_common.device = &atdma->dma_common; - atchan->chan_common.cookie = atchan->completed_cookie = 1; atchan->chan_common.chan_id = i; + atchan->chan_common.cookie = atchan->chan_common.completed_cookie = 1; list_add_tail(&atchan->chan_common.device_node, &atdma->dma_common.channels); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 087dbf1dd39c..1f5e4bce2094 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -205,7 +205,6 @@ enum atc_status { * to tasklet (use atomic operations) * @tasklet: bottom half to finish transaction work * @lock: serializes enqueue/dequeue operations to descriptors lists - * @completed_cookie: identifier for the most recently completed operation * @active_list: list of descriptors dmaengine is being running on * @queue: list of descriptors ready to be submitted to engine * @free_list: list of descriptors usable by the channel @@ -222,7 +221,6 @@ struct at_dma_chan { spinlock_t lock; /* these other elements are all protected by lock */ - dma_cookie_t completed_cookie; struct list_head active_list; struct list_head queue; struct list_head free_list; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 4234f416ef11..489a2c77b7d8 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -59,7 +59,6 @@ struct coh901318_base { struct coh901318_chan { spinlock_t lock; int allocated; - int completed; int id; int stopped; @@ -705,7 +704,7 @@ static void dma_tasklet(unsigned long data) callback_param = cohd_fin->desc.callback_param; /* sign this job as completed on the channel */ - cohc->completed = cohd_fin->desc.cookie; + cohc->chan.completed_cookie = cohd_fin->desc.cookie; /* release the lli allocation and remove the descriptor */ coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); @@ -929,7 +928,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan) coh901318_config(cohc, NULL); cohc->allocated = 1; - cohc->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; spin_unlock_irqrestore(&cohc->lock, flags); @@ -1169,7 +1168,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t last_complete; int ret; - last_complete = cohc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 4d180ca9a1d8..a5d2a12a3e37 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -211,7 +211,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); spin_lock_irqsave(&dwc->lock, flags); - dwc->completed = txd->cookie; + dwc->chan.completed_cookie = txd->cookie; if (callback_required) { callback = txd->callback; param = txd->callback_param; @@ -921,14 +921,14 @@ dwc_tx_status(struct dma_chan *chan, dma_cookie_t last_complete; int ret; - last_complete = dwc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { dwc_scan_descriptors(to_dw_dma(chan->device), dwc); - last_complete = dwc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -973,7 +973,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) return -EIO; } - dwc->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; cfghi = DWC_CFGH_FIFO_MODE; cfglo = 0; @@ -1411,8 +1411,8 @@ static int __init dw_probe(struct platform_device *pdev) struct dw_dma_chan *dwc = &dw->chan[i]; dwc->chan.device = &dw->dma; - dwc->chan.cookie = dwc->completed = 1; dwc->chan.chan_id = i; + dwc->chan.cookie = dwc->chan.completed_cookie = 1; if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) list_add_tail(&dwc->chan.device_node, &dw->dma.channels); diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index c3419518d701..b119fd522927 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -145,7 +145,6 @@ struct dw_dma_chan { /* these other elements are all protected by lock */ unsigned long flags; - dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 5d7a49bd7c26..3ba758345123 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -121,7 +121,6 @@ struct ep93xx_dma_desc { * @lock: lock protecting the fields following * @flags: flags for the channel * @buffer: which buffer to use next (0/1) - * @last_completed: last completed cookie value * @active: flattened chain of descriptors currently being processed * @queue: pending descriptors which are handled next * @free_list: list of free descriptors which can be used @@ -156,7 +155,6 @@ struct ep93xx_dma_chan { #define EP93XX_DMA_IS_CYCLIC 0 int buffer; - dma_cookie_t last_completed; struct list_head active; struct list_head queue; struct list_head free_list; @@ -674,9 +672,13 @@ static void ep93xx_dma_tasklet(unsigned long data) spin_lock_irq(&edmac->lock); desc = ep93xx_dma_get_active(edmac); - if (desc->complete) { - edmac->last_completed = desc->txd.cookie; - list_splice_init(&edmac->active, &list); + if (desc) { + if (desc->complete) { + edmac->chan.completed_cookie = desc->txd.cookie; + list_splice_init(&edmac->active, &list); + } + callback = desc->txd.callback; + callback_param = desc->txd.callback_param; } spin_unlock_irq(&edmac->lock); @@ -824,7 +826,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) goto fail_clk_disable; spin_lock_irq(&edmac->lock); - edmac->last_completed = 1; + edmac->chan.completed_cookie = 1; edmac->chan.cookie = 1; ret = edmac->edma->hw_setup(edmac); spin_unlock_irq(&edmac->lock); @@ -1217,7 +1219,7 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, spin_lock_irqsave(&edmac->lock, flags); last_used = chan->cookie; - last_completed = edmac->last_completed; + last_completed = chan->completed_cookie; spin_unlock_irqrestore(&edmac->lock, flags); ret = dma_async_is_complete(cookie, last_completed, last_used); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 8a781540590c..a7eddd336321 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -990,7 +990,7 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, spin_lock_irqsave(&chan->desc_lock, flags); - last_complete = chan->completed_cookie; + last_complete = dchan->completed_cookie; last_used = dchan->cookie; spin_unlock_irqrestore(&chan->desc_lock, flags); @@ -1088,7 +1088,7 @@ static void dma_do_tasklet(unsigned long data) desc = to_fsl_desc(chan->ld_running.prev); cookie = desc->async_tx.cookie; - chan->completed_cookie = cookie; + chan->common.completed_cookie = cookie; chan_dbg(chan, "completed_cookie=%d\n", cookie); } diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 9cb5aa57c677..f5c38791fc74 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -137,7 +137,6 @@ struct fsldma_device { struct fsldma_chan { char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; - dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ struct list_head ld_pending; /* Link descriptors queue */ struct list_head ld_running; /* Link descriptors queue */ diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index d99f71c356b5..935490be7ec8 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -39,7 +39,6 @@ struct imxdma_channel { struct dma_chan chan; spinlock_t lock; struct dma_async_tx_descriptor desc; - dma_cookie_t last_completed; enum dma_status status; int dma_request; struct scatterlist *sg_list; @@ -63,7 +62,7 @@ static void imxdma_handle(struct imxdma_channel *imxdmac) { if (imxdmac->desc.callback) imxdmac->desc.callback(imxdmac->desc.callback_param); - imxdmac->last_completed = imxdmac->desc.cookie; + imxdmac->chan.completed_cookie = imxdmac->desc.cookie; } static void imxdma_irq_handler(int channel, void *data) @@ -156,8 +155,8 @@ static enum dma_status imxdma_tx_status(struct dma_chan *chan, last_used = chan->cookie; - ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); - dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); + ret = dma_async_is_complete(cookie, chan->completed_cookie, last_used); + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); return ret; } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7bd7e98548cd..c3dd583ff138 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -264,7 +264,6 @@ struct sdma_channel { struct dma_chan chan; spinlock_t lock; struct dma_async_tx_descriptor desc; - dma_cookie_t last_completed; enum dma_status status; }; @@ -509,6 +508,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) else sdmac->status = DMA_SUCCESS; + sdmac->chan.completed_cookie = sdmac->desc.cookie; if (sdmac->desc.callback) sdmac->desc.callback(sdmac->desc.callback_param); sdmac->last_completed = sdmac->desc.cookie; @@ -1105,7 +1105,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, last_used = chan->cookie; +<<<<<<< HEAD dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); +======= + dma_set_tx_state(txstate, chan->completed_cookie, last_used, + sdmac->chn_count - sdmac->chn_real_count); +>>>>>>> 4d4e58d... dmaengine: move last completed cookie into generic dma_chan structure return sdmac->status; } diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 8a3fdd87db97..e8b575bd086d 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -288,7 +288,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, struct intel_mid_dma_lli *llitem; void *param_txd = NULL; - midc->completed = txd->cookie; + midc->chan.completed_cookie = txd->cookie; callback_txd = txd->callback; param_txd = txd->callback_param; @@ -481,19 +481,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; int ret; - last_complete = midc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret != DMA_SUCCESS) { midc_scan_descriptors(to_middma_device(chan->device), midc); - last_complete = midc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -882,7 +881,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) pm_runtime_put(&mid->pdev->dev); return -EIO; } - midc->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; spin_lock_bh(&midc->lock); while (midc->descs_allocated < DESCS_PER_CHANNEL) { diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index aea5ee88ce03..542ea85f83f6 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h @@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi { * @dma_base: MMIO register space DMA engine base pointer * @ch_id: DMA channel id * @lock: channel spinlock - * @completed: DMA cookie * @active_list: current active descriptors * @queue: current queued up descriptors * @free_list: current free descriptors @@ -183,7 +182,6 @@ struct intel_mid_dma_chan { void __iomem *dma_base; int ch_id; spinlock_t lock; - dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index a4d6cb0c0343..fab440af1f9a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -603,7 +603,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) */ dump_desc_dbg(ioat, desc); if (tx->cookie) { - chan->completed_cookie = tx->cookie; + chan->common.completed_cookie = tx->cookie; tx->cookie = 0; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); ioat->active -= desc->hw->tx_cnt; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5216c8a92a21..9653b6b6a715 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -90,7 +90,6 @@ struct ioat_chan_common { void __iomem *reg_base; unsigned long last_completion; spinlock_t cleanup_lock; - dma_cookie_t completed_cookie; unsigned long state; #define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_ACK 1 @@ -153,12 +152,11 @@ static inline enum dma_status ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct ioat_chan_common *chan = to_chan_common(c); dma_cookie_t last_used; dma_cookie_t last_complete; last_used = c->cookie; - last_complete = chan->completed_cookie; + last_complete = c->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5d65f8377971..d3f0aff2c02a 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -147,7 +147,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) dump_desc_dbg(ioat, desc); if (tx->cookie) { ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); - chan->completed_cookie = tx->cookie; + chan->common.completed_cookie = tx->cookie; tx->cookie = 0; if (tx->callback) { tx->callback(tx->callback_param); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index f519c93a61e7..d4afac741e8a 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -277,7 +277,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) dump_desc_dbg(ioat, desc); tx = &desc->txd; if (tx->cookie) { - chan->completed_cookie = tx->cookie; + chan->common.completed_cookie = tx->cookie; ioat3_dma_unmap(ioat, desc, idx + i); tx->cookie = 0; if (tx->callback) { diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e03f811a83dd..27d5a6ed7cdb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -317,7 +317,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) } if (cookie > 0) { - iop_chan->completed_cookie = cookie; + iop_chan->common.completed_cookie = cookie; pr_debug("\tcompleted cookie %d\n", cookie); } } @@ -909,7 +909,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, enum dma_status ret; last_used = chan->cookie; - last_complete = iop_chan->completed_cookie; + last_complete = chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); ret = dma_async_is_complete(cookie, last_complete, last_used); if (ret == DMA_SUCCESS) @@ -918,7 +918,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan, iop_adma_slot_cleanup(iop_chan); last_used = chan->cookie; - last_complete = iop_chan->completed_cookie; + last_complete = chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); @@ -1650,7 +1650,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) /* initialize the completed cookie to be less than * the most recently used cookie */ - iop_chan->completed_cookie = cookie - 1; + iop_chan->common.completed_cookie = cookie - 1; iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; /* channel should not be busy */ @@ -1707,7 +1707,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) /* initialize the completed cookie to be less than * the most recently used cookie */ - iop_chan->completed_cookie = cookie - 1; + iop_chan->common.completed_cookie = cookie - 1; iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie; /* channel should not be busy */ diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 6815905a772f..82fc4679ddbd 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1316,7 +1316,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) /* Flip the active buffer - even if update above failed */ ichan->active_buffer = !ichan->active_buffer; if (done) - ichan->completed = desc->txd.cookie; + ichan->dma_chan.completed_cookie = desc->txd.cookie; callback = desc->txd.callback; callback_param = desc->txd.callback_param; @@ -1513,7 +1513,7 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) WARN_ON(ichan->status != IPU_CHANNEL_FREE); chan->cookie = 1; - ichan->completed = -ENXIO; + chan->completed_cookie = -ENXIO; ret = ipu_irq_map(chan->chan_id); if (ret < 0) @@ -1602,9 +1602,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) static enum dma_status idmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - struct idmac_channel *ichan = to_idmac_chan(chan); - - dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); + dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0); if (cookie != chan->cookie) return DMA_ERROR; return DMA_SUCCESS; @@ -1640,11 +1638,11 @@ static int __init ipu_idmac_init(struct ipu *ipu) ichan->status = IPU_CHANNEL_FREE; ichan->sec_chan_en = false; - ichan->completed = -ENXIO; snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); dma_chan->device = &idmac->dma; dma_chan->cookie = 1; + dma_chan->completed_cookie = -ENXIO; dma_chan->chan_id = i; list_add_tail(&dma_chan->device_node, &dma->channels); } diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index b9bae94f2015..640858b61be2 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -188,7 +188,6 @@ struct mpc_dma_chan { struct list_head completed; struct mpc_dma_tcd *tcd; dma_addr_t tcd_paddr; - dma_cookie_t completed_cookie; /* Lock for this structure */ spinlock_t lock; @@ -365,7 +364,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma) /* Free descriptors */ spin_lock_irqsave(&mchan->lock, flags); list_splice_tail_init(&list, &mchan->free); - mchan->completed_cookie = last_cookie; + mchan->chan.completed_cookie = last_cookie; spin_unlock_irqrestore(&mchan->lock, flags); } } @@ -568,7 +567,7 @@ mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, spin_lock_irqsave(&mchan->lock, flags); last_used = mchan->chan.cookie; - last_complete = mchan->completed_cookie; + last_complete = mchan->chan.completed_cookie; spin_unlock_irqrestore(&mchan->lock, flags); dma_set_tx_state(txstate, last_complete, last_used, 0); @@ -743,7 +742,7 @@ static int __devinit mpc_dma_probe(struct platform_device *op) mchan->chan.device = dma; mchan->chan.chan_id = i; mchan->chan.cookie = 1; - mchan->completed_cookie = mchan->chan.cookie; + mchan->chan.completed_cookie = mchan->chan.cookie; INIT_LIST_HEAD(&mchan->free); INIT_LIST_HEAD(&mchan->prepared); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 9a353c2216d0..ee1607e46e09 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -435,7 +435,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) } if (cookie > 0) - mv_chan->completed_cookie = cookie; + mv_chan->common.completed_cookie = cookie; } static void @@ -825,7 +825,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; last_used = chan->cookie; - last_complete = mv_chan->completed_cookie; + last_complete = chan->completed_cookie; mv_chan->is_complete_cookie = cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); @@ -837,7 +837,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, mv_xor_slot_cleanup(mv_chan); last_used = chan->cookie; - last_complete = mv_chan->completed_cookie; + last_complete = chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 977b592e976b..4e4780e1b7e0 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -78,7 +78,6 @@ struct mv_xor_device { /** * struct mv_xor_chan - internal representation of a XOR channel * @pending: allows batching of hardware operations - * @completed_cookie: identifier for the most recently completed operation * @lock: serializes enqueue/dequeue operations to the descriptors pool * @mmr_base: memory mapped register base * @idx: the index of the xor channel @@ -93,7 +92,6 @@ struct mv_xor_device { */ struct mv_xor_chan { int pending; - dma_cookie_t completed_cookie; spinlock_t lock; /* protects the descriptor slot pool */ void __iomem *mmr_base; unsigned int idx; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index be641cbd36fc..7ce23a24b412 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -111,7 +111,7 @@ struct mxs_dma_chan { int chan_irq; struct mxs_dma_ccw *ccw; dma_addr_t ccw_phys; - dma_cookie_t last_completed; + int desc_count; enum dma_status status; unsigned int flags; #define MXS_DMA_SG_LOOP (1 << 0) @@ -297,7 +297,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) stat1 &= ~(1 << channel); if (mxs_chan->status == DMA_SUCCESS) - mxs_chan->last_completed = mxs_chan->desc.cookie; + mxs_chan->chan.completed_cookie = mxs_chan->desc.cookie; /* schedule tasklet on this channel */ tasklet_schedule(&mxs_chan->tasklet); @@ -559,7 +559,7 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t last_used; last_used = chan->cookie; - dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); + dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); return mxs_chan->status; } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1ac8d4b580b7..cf2680794592 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -105,7 +105,6 @@ struct pch_dma_chan { spinlock_t lock; - dma_cookie_t completed_cookie; struct list_head active_list; struct list_head queue; struct list_head free_list; @@ -544,7 +543,7 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) spin_lock_irq(&pd_chan->lock); list_splice(&tmp_list, &pd_chan->free_list); pd_chan->descs_allocated = i; - pd_chan->completed_cookie = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; spin_unlock_irq(&pd_chan->lock); pdc_enable_irq(chan, 1); @@ -583,7 +582,7 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, int ret; spin_lock_irq(&pd_chan->lock); - last_completed = pd_chan->completed_cookie; + last_completed = chan->completed_cookie; last_used = chan->cookie; spin_unlock_irq(&pd_chan->lock); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 00eee59e8b33..7bc1a56f497f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -48,9 +48,6 @@ struct dma_pl330_chan { /* DMA-Engine Channel */ struct dma_chan chan; - /* Last completed cookie */ - dma_cookie_t completed; - /* List of to be xfered descriptors */ struct list_head work_list; @@ -193,7 +190,7 @@ static void pl330_tasklet(unsigned long data) /* Pick up ripe tomatoes */ list_for_each_entry_safe(desc, _dt, &pch->work_list, node) if (desc->status == DONE) { - pch->completed = desc->txd.cookie; + pch->chan.completed_cookie = desc->txd.cookie; list_move_tail(&desc->node, &list); } @@ -235,7 +232,8 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&pch->lock, flags); - pch->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; + pch->cyclic = false; pch->pl330_chid = pl330_request_channel(&pdmac->pif); if (!pch->pl330_chid) { @@ -299,7 +297,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t last_done, last_used; int ret; - last_done = pch->completed; + last_done = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_done, last_used); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index fc457a7e8832..f878322ecbcb 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -1930,7 +1930,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) if (end_of_chain && slot_cnt) { /* Should wait for ZeroSum completion */ if (cookie > 0) - chan->completed_cookie = cookie; + chan->common.completed_cookie = cookie; return; } @@ -1960,7 +1960,7 @@ static void __ppc440spe_adma_slot_cleanup(struct ppc440spe_adma_chan *chan) BUG_ON(!seen_current); if (cookie > 0) { - chan->completed_cookie = cookie; + chan->common.completed_cookie = cookie; pr_debug("\tcompleted cookie %d\n", cookie); } @@ -3950,7 +3950,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_chan = to_ppc440spe_adma_chan(chan); last_used = chan->cookie; - last_complete = ppc440spe_chan->completed_cookie; + last_complete = chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); @@ -3961,7 +3961,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, ppc440spe_adma_slot_cleanup(ppc440spe_chan); last_used = chan->cookie; - last_complete = ppc440spe_chan->completed_cookie; + last_complete = chan->completed_cookie; dma_set_tx_state(txstate, last_complete, last_used, 0); @@ -4058,7 +4058,7 @@ static void ppc440spe_chan_start_null_xor(struct ppc440spe_adma_chan *chan) /* initialize the completed cookie to be less than * the most recently used cookie */ - chan->completed_cookie = cookie - 1; + chan->common.completed_cookie = cookie - 1; chan->common.cookie = sw_desc->async_tx.cookie = cookie; /* channel should not be busy */ diff --git a/drivers/dma/ppc4xx/adma.h b/drivers/dma/ppc4xx/adma.h index 8ada5a812e3b..26b7a5ed9ac7 100644 --- a/drivers/dma/ppc4xx/adma.h +++ b/drivers/dma/ppc4xx/adma.h @@ -81,7 +81,6 @@ struct ppc440spe_adma_device { * @common: common dmaengine channel object members * @all_slots: complete domain of slots usable by the channel * @pending: allows batching of hardware operations - * @completed_cookie: identifier for the most recently completed operation * @slots_allocated: records the actual size of the descriptor slot pool * @hw_chain_inited: h/w descriptor chain initialization flag * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs @@ -99,7 +98,6 @@ struct ppc440spe_adma_chan { struct list_head all_slots; struct ppc440spe_adma_desc_slot *last_used; int pending; - dma_cookie_t completed_cookie; int slots_allocated; int hw_chain_inited; struct tasklet_struct irq_tasklet; diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 7f49235d14b9..9291c388690d 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -718,12 +718,12 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all cookie = tx->cookie; if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { - if (sh_chan->completed_cookie != desc->cookie - 1) + if (sh_chan->common.completed_cookie != desc->cookie - 1) dev_dbg(sh_chan->dev, "Completing cookie %d, expected %d\n", desc->cookie, - sh_chan->completed_cookie + 1); - sh_chan->completed_cookie = desc->cookie; + sh_chan->common.completed_cookie + 1); + sh_chan->common.completed_cookie = desc->cookie; } /* Call callback on the last chunk */ @@ -771,7 +771,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all * Terminating and the loop completed normally: forgive * uncompleted cookies */ - sh_chan->completed_cookie = sh_chan->common.cookie; + sh_chan->common.completed_cookie = sh_chan->common.cookie; spin_unlock_bh(&sh_chan->desc_lock); @@ -835,7 +835,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, sh_dmae_chan_ld_cleanup(sh_chan, false); /* First read completed cookie to avoid a skew */ - last_complete = sh_chan->completed_cookie; + last_complete = chan->completed_cookie; rmb(); last_used = chan->cookie; BUG_ON(last_complete < 0); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index dc56576f9fdb..e5c4fdfbfcea 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -24,7 +24,6 @@ struct device; struct sh_dmae_chan { - dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ struct list_head ld_queue; /* Link descriptors queue */ struct list_head ld_free; /* Link descriptors free */ diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c new file mode 100644 index 000000000000..60473f00cf1c --- /dev/null +++ b/drivers/dma/sirf-dma.c @@ -0,0 +1,706 @@ +/* + * DMA controller driver for CSR SiRFprimaII + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SIRFSOC_DMA_DESCRIPTORS 16 +#define SIRFSOC_DMA_CHANNELS 16 + +#define SIRFSOC_DMA_CH_ADDR 0x00 +#define SIRFSOC_DMA_CH_XLEN 0x04 +#define SIRFSOC_DMA_CH_YLEN 0x08 +#define SIRFSOC_DMA_CH_CTRL 0x0C + +#define SIRFSOC_DMA_WIDTH_0 0x100 +#define SIRFSOC_DMA_CH_VALID 0x140 +#define SIRFSOC_DMA_CH_INT 0x144 +#define SIRFSOC_DMA_INT_EN 0x148 +#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 + +#define SIRFSOC_DMA_MODE_CTRL_BIT 4 +#define SIRFSOC_DMA_DIR_CTRL_BIT 5 + +/* xlen and dma_width register is in 4 bytes boundary */ +#define SIRFSOC_DMA_WORD_LEN 4 + +struct sirfsoc_dma_desc { + struct dma_async_tx_descriptor desc; + struct list_head node; + + /* SiRFprimaII 2D-DMA parameters */ + + int xlen; /* DMA xlen */ + int ylen; /* DMA ylen */ + int width; /* DMA width */ + int dir; + bool cyclic; /* is loop DMA? */ + u32 addr; /* DMA buffer address */ +}; + +struct sirfsoc_dma_chan { + struct dma_chan chan; + struct list_head free; + struct list_head prepared; + struct list_head queued; + struct list_head active; + struct list_head completed; + unsigned long happened_cyclic; + unsigned long completed_cyclic; + + /* Lock for this structure */ + spinlock_t lock; + + int mode; +}; + +struct sirfsoc_dma { + struct dma_device dma; + struct tasklet_struct tasklet; + struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; + void __iomem *base; + int irq; +}; + +#define DRV_NAME "sirfsoc_dma" + +/* Convert struct dma_chan to struct sirfsoc_dma_chan */ +static inline +struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct sirfsoc_dma_chan, chan); +} + +/* Convert struct dma_chan to struct sirfsoc_dma */ +static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); + return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); +} + +/* Execute all queued DMA descriptors */ +static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); + int cid = schan->chan.chan_id; + struct sirfsoc_dma_desc *sdesc = NULL; + + /* + * lock has been held by functions calling this, so we don't hold + * lock again + */ + + sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, + node); + /* Move the first queued descriptor to active list */ + list_move_tail(&schan->queued, &schan->active); + + /* Start the DMA transfer */ + writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + + cid * 4); + writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), + sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); + writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + + SIRFSOC_DMA_CH_XLEN); + writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + + SIRFSOC_DMA_CH_YLEN); + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | + (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + + /* + * writel has an implict memory write barrier to make sure data is + * flushed into memory before starting DMA + */ + writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); + + if (sdesc->cyclic) { + writel((1 << cid) | 1 << (cid + 16) | + readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + schan->happened_cyclic = schan->completed_cyclic = 0; + } +} + +/* Interrupt handler */ +static irqreturn_t sirfsoc_dma_irq(int irq, void *data) +{ + struct sirfsoc_dma *sdma = data; + struct sirfsoc_dma_chan *schan; + struct sirfsoc_dma_desc *sdesc = NULL; + u32 is; + int ch; + + is = readl(sdma->base + SIRFSOC_DMA_CH_INT); + while ((ch = fls(is) - 1) >= 0) { + is &= ~(1 << ch); + writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); + schan = &sdma->channels[ch]; + + spin_lock(&schan->lock); + + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + if (!sdesc->cyclic) { + /* Execute queued descriptors */ + list_splice_tail_init(&schan->active, &schan->completed); + if (!list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + } else + schan->happened_cyclic++; + + spin_unlock(&schan->lock); + } + + /* Schedule tasklet */ + tasklet_schedule(&sdma->tasklet); + + return IRQ_HANDLED; +} + +/* process completed descriptors */ +static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) +{ + dma_cookie_t last_cookie = 0; + struct sirfsoc_dma_chan *schan; + struct sirfsoc_dma_desc *sdesc; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + unsigned long happened_cyclic; + LIST_HEAD(list); + int i; + + for (i = 0; i < sdma->dma.chancnt; i++) { + schan = &sdma->channels[i]; + + /* Get all completed descriptors */ + spin_lock_irqsave(&schan->lock, flags); + if (!list_empty(&schan->completed)) { + list_splice_tail_init(&schan->completed, &list); + spin_unlock_irqrestore(&schan->lock, flags); + + /* Execute callbacks and run dependencies */ + list_for_each_entry(sdesc, &list, node) { + desc = &sdesc->desc; + + if (desc->callback) + desc->callback(desc->callback_param); + + last_cookie = desc->cookie; + dma_run_dependencies(desc); + } + + /* Free descriptors */ + spin_lock_irqsave(&schan->lock, flags); + list_splice_tail_init(&list, &schan->free); + schan->chan.completed_cookie = last_cookie; + spin_unlock_irqrestore(&schan->lock, flags); + } else { + /* for cyclic channel, desc is always in active list */ + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, + node); + + if (!sdesc || (sdesc && !sdesc->cyclic)) { + /* without active cyclic DMA */ + spin_unlock_irqrestore(&schan->lock, flags); + continue; + } + + /* cyclic DMA */ + happened_cyclic = schan->happened_cyclic; + spin_unlock_irqrestore(&schan->lock, flags); + + desc = &sdesc->desc; + while (happened_cyclic != schan->completed_cyclic) { + if (desc->callback) + desc->callback(desc->callback_param); + schan->completed_cyclic++; + } + } + } +} + +/* DMA Tasklet */ +static void sirfsoc_dma_tasklet(unsigned long data) +{ + struct sirfsoc_dma *sdma = (void *)data; + + sirfsoc_dma_process_completed(sdma); +} + +/* Submit descriptor to hardware */ +static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); + struct sirfsoc_dma_desc *sdesc; + unsigned long flags; + dma_cookie_t cookie; + + sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); + + spin_lock_irqsave(&schan->lock, flags); + + /* Move descriptor to queue */ + list_move_tail(&sdesc->node, &schan->queued); + + /* Update cookie */ + cookie = schan->chan.cookie + 1; + if (cookie <= 0) + cookie = 1; + + schan->chan.cookie = cookie; + sdesc->desc.cookie = cookie; + + spin_unlock_irqrestore(&schan->lock, flags); + + return cookie; +} + +static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, + struct dma_slave_config *config) +{ + unsigned long flags; + + if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || + (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) + return -EINVAL; + + spin_lock_irqsave(&schan->lock, flags); + schan->mode = (config->src_maxburst == 4 ? 1 : 0); + spin_unlock_irqrestore(&schan->lock, flags); + + return 0; +} + +static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); + int cid = schan->chan.chan_id; + unsigned long flags; + + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & + ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); + + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) + & ~((1 << cid) | 1 << (cid + 16)), + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); + + spin_lock_irqsave(&schan->lock, flags); + list_splice_tail_init(&schan->active, &schan->free); + list_splice_tail_init(&schan->queued, &schan->free); + spin_unlock_irqrestore(&schan->lock, flags); + + return 0; +} + +static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct dma_slave_config *config; + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + + switch (cmd) { + case DMA_TERMINATE_ALL: + return sirfsoc_dma_terminate_all(schan); + case DMA_SLAVE_CONFIG: + config = (struct dma_slave_config *)arg; + return sirfsoc_dma_slave_config(schan, config); + + default: + break; + } + + return -ENOSYS; +} + +/* Alloc channel resources */ +static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc; + unsigned long flags; + LIST_HEAD(descs); + int i; + + /* Alloc descriptors for this channel */ + for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { + sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); + if (!sdesc) { + dev_notice(sdma->dma.dev, "Memory allocation error. " + "Allocated only %u descriptors\n", i); + break; + } + + dma_async_tx_descriptor_init(&sdesc->desc, chan); + sdesc->desc.flags = DMA_CTRL_ACK; + sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; + + list_add_tail(&sdesc->node, &descs); + } + + /* Return error only if no descriptors were allocated */ + if (i == 0) + return -ENOMEM; + + spin_lock_irqsave(&schan->lock, flags); + + list_splice_tail_init(&descs, &schan->free); + spin_unlock_irqrestore(&schan->lock, flags); + + return i; +} + +/* Free channel resources */ +static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc, *tmp; + unsigned long flags; + LIST_HEAD(descs); + + spin_lock_irqsave(&schan->lock, flags); + + /* Channel must be idle */ + BUG_ON(!list_empty(&schan->prepared)); + BUG_ON(!list_empty(&schan->queued)); + BUG_ON(!list_empty(&schan->active)); + BUG_ON(!list_empty(&schan->completed)); + + /* Move data */ + list_splice_tail_init(&schan->free, &descs); + + spin_unlock_irqrestore(&schan->lock, flags); + + /* Free descriptors */ + list_for_each_entry_safe(sdesc, tmp, &descs, node) + kfree(sdesc); +} + +/* Send pending descriptor to hardware */ +static void sirfsoc_dma_issue_pending(struct dma_chan *chan) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&schan->lock, flags); + + if (list_empty(&schan->active) && !list_empty(&schan->queued)) + sirfsoc_dma_execute(schan); + + spin_unlock_irqrestore(&schan->lock, flags); +} + +/* Check request completion status */ +static enum dma_status +sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + unsigned long flags; + dma_cookie_t last_used; + dma_cookie_t last_complete; + + spin_lock_irqsave(&schan->lock, flags); + last_used = schan->chan.cookie; + last_complete = schan->chan.completed_cookie; + spin_unlock_irqrestore(&schan->lock, flags); + + dma_set_tx_state(txstate, last_complete, last_used, 0); + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( + struct dma_chan *chan, struct dma_interleaved_template *xt, + unsigned long flags) +{ + struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc = NULL; + unsigned long iflags; + int ret; + + if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { + ret = -EINVAL; + goto err_dir; + } + + /* Get free descriptor */ + spin_lock_irqsave(&schan->lock, iflags); + if (!list_empty(&schan->free)) { + sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, + node); + list_del(&sdesc->node); + } + spin_unlock_irqrestore(&schan->lock, iflags); + + if (!sdesc) { + /* try to free completed descriptors */ + sirfsoc_dma_process_completed(sdma); + ret = 0; + goto no_desc; + } + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&schan->lock, iflags); + + /* + * Number of chunks in a frame can only be 1 for prima2 + * and ylen (number of frame - 1) must be at least 0 + */ + if ((xt->frame_size == 1) && (xt->numf > 0)) { + sdesc->cyclic = 0; + sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; + sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / + SIRFSOC_DMA_WORD_LEN; + sdesc->ylen = xt->numf - 1; + if (xt->dir == DMA_MEM_TO_DEV) { + sdesc->addr = xt->src_start; + sdesc->dir = 1; + } else { + sdesc->addr = xt->dst_start; + sdesc->dir = 0; + } + + list_add_tail(&sdesc->node, &schan->prepared); + } else { + pr_err("sirfsoc DMA Invalid xfer\n"); + ret = -EINVAL; + goto err_xfer; + } + spin_unlock_irqrestore(&schan->lock, iflags); + + return &sdesc->desc; +err_xfer: + spin_unlock_irqrestore(&schan->lock, iflags); +no_desc: +err_dir: + return ERR_PTR(ret); +} + +static struct dma_async_tx_descriptor * +sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction direction) +{ + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); + struct sirfsoc_dma_desc *sdesc = NULL; + unsigned long iflags; + + /* + * we only support cycle transfer with 2 period + * If the X-length is set to 0, it would be the loop mode. + * The DMA address keeps increasing until reaching the end of a loop + * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then + * the DMA address goes back to the beginning of this area. + * In loop mode, the DMA data region is divided into two parts, BUFA + * and BUFB. DMA controller generates interrupts twice in each loop: + * when the DMA address reaches the end of BUFA or the end of the + * BUFB + */ + if (buf_len != 2 * period_len) + return ERR_PTR(-EINVAL); + + /* Get free descriptor */ + spin_lock_irqsave(&schan->lock, iflags); + if (!list_empty(&schan->free)) { + sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, + node); + list_del(&sdesc->node); + } + spin_unlock_irqrestore(&schan->lock, iflags); + + if (!sdesc) + return 0; + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&schan->lock, iflags); + sdesc->addr = addr; + sdesc->cyclic = 1; + sdesc->xlen = 0; + sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; + sdesc->width = 1; + list_add_tail(&sdesc->node, &schan->prepared); + spin_unlock_irqrestore(&schan->lock, iflags); + + return &sdesc->desc; +} + +/* + * The DMA controller consists of 16 independent DMA channels. + * Each channel is allocated to a different function + */ +bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) +{ + unsigned int ch_nr = (unsigned int) chan_id; + + if (ch_nr == chan->chan_id + + chan->device->dev_id * SIRFSOC_DMA_CHANNELS) + return true; + + return false; +} +EXPORT_SYMBOL(sirfsoc_dma_filter_id); + +static int __devinit sirfsoc_dma_probe(struct platform_device *op) +{ + struct device_node *dn = op->dev.of_node; + struct device *dev = &op->dev; + struct dma_device *dma; + struct sirfsoc_dma *sdma; + struct sirfsoc_dma_chan *schan; + struct resource res; + ulong regs_start, regs_size; + u32 id; + int ret, i; + + sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); + if (!sdma) { + dev_err(dev, "Memory exhausted!\n"); + return -ENOMEM; + } + + if (of_property_read_u32(dn, "cell-index", &id)) { + dev_err(dev, "Fail to get DMAC index\n"); + ret = -ENODEV; + goto free_mem; + } + + sdma->irq = irq_of_parse_and_map(dn, 0); + if (sdma->irq == NO_IRQ) { + dev_err(dev, "Error mapping IRQ!\n"); + ret = -EINVAL; + goto free_mem; + } + + ret = of_address_to_resource(dn, 0, &res); + if (ret) { + dev_err(dev, "Error parsing memory region!\n"); + goto free_mem; + } + + regs_start = res.start; + regs_size = resource_size(&res); + + sdma->base = devm_ioremap(dev, regs_start, regs_size); + if (!sdma->base) { + dev_err(dev, "Error mapping memory region!\n"); + ret = -ENOMEM; + goto irq_dispose; + } + + ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, + sdma); + if (ret) { + dev_err(dev, "Error requesting IRQ!\n"); + ret = -EINVAL; + goto unmap_mem; + } + + dma = &sdma->dma; + dma->dev = dev; + dma->chancnt = SIRFSOC_DMA_CHANNELS; + + dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; + dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; + dma->device_issue_pending = sirfsoc_dma_issue_pending; + dma->device_control = sirfsoc_dma_control; + dma->device_tx_status = sirfsoc_dma_tx_status; + dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; + dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; + + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + + for (i = 0; i < dma->chancnt; i++) { + schan = &sdma->channels[i]; + + schan->chan.device = dma; + schan->chan.cookie = 1; + schan->chan.completed_cookie = schan->chan.cookie; + + INIT_LIST_HEAD(&schan->free); + INIT_LIST_HEAD(&schan->prepared); + INIT_LIST_HEAD(&schan->queued); + INIT_LIST_HEAD(&schan->active); + INIT_LIST_HEAD(&schan->completed); + + spin_lock_init(&schan->lock); + list_add_tail(&schan->chan.device_node, &dma->channels); + } + + tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); + + /* Register DMA engine */ + dev_set_drvdata(dev, sdma); + ret = dma_async_device_register(dma); + if (ret) + goto free_irq; + + dev_info(dev, "initialized SIRFSOC DMAC driver\n"); + + return 0; + +free_irq: + devm_free_irq(dev, sdma->irq, sdma); +irq_dispose: + irq_dispose_mapping(sdma->irq); +unmap_mem: + iounmap(sdma->base); +free_mem: + devm_kfree(dev, sdma); + return ret; +} + +static int __devexit sirfsoc_dma_remove(struct platform_device *op) +{ + struct device *dev = &op->dev; + struct sirfsoc_dma *sdma = dev_get_drvdata(dev); + + dma_async_device_unregister(&sdma->dma); + devm_free_irq(dev, sdma->irq, sdma); + irq_dispose_mapping(sdma->irq); + iounmap(sdma->base); + devm_kfree(dev, sdma); + return 0; +} + +static struct of_device_id sirfsoc_dma_match[] = { + { .compatible = "sirf,prima2-dmac", }, + {}, +}; + +static struct platform_driver sirfsoc_dma_driver = { + .probe = sirfsoc_dma_probe, + .remove = __devexit_p(sirfsoc_dma_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = sirfsoc_dma_match, + }, +}; + +module_platform_driver(sirfsoc_dma_driver); + +MODULE_AUTHOR("Rongjun Ying , " + "Barry Song "); +MODULE_DESCRIPTION("SIRFSOC DMA control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 467e4dcb20a0..56c1dc64b0cf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -163,8 +163,6 @@ struct d40_base; * * @lock: A spinlock to protect this struct. * @log_num: The logical number, if any of this channel. - * @completed: Starts with 1, after first interrupt it is set to dma engine's - * current cookie. * @pending_tx: The number of pending transfers. Used between interrupt handler * and tasklet. * @busy: Set to true when transfer is ongoing on this channel. @@ -194,8 +192,6 @@ struct d40_base; struct d40_chan { spinlock_t lock; int log_num; - /* ID of the most recent completed transfer */ - int completed; int pending_tx; bool busy; struct d40_phy_res *phy_chan; @@ -1212,7 +1208,7 @@ static void dma_tasklet(unsigned long data) goto err; if (!d40d->cyclic) - d40c->completed = d40d->txd.cookie; + d40c->chan.completed_cookie = d40d->txd.cookie; /* * If terminating a channel pending_tx is set to zero. @@ -2000,7 +1996,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) bool is_free_phy; spin_lock_irqsave(&d40c->lock, flags); - d40c->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; /* If no dma configuration is set use default configuration (memcpy) */ if (!d40c->configured) { @@ -2160,7 +2156,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan, return -EINVAL; } - last_complete = d40c->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; if (d40_is_paused(d40c)) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index f69f90a61873..3ecd052b2b67 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -84,7 +84,6 @@ struct timb_dma_chan { especially the lists and descriptors, from races between the tasklet and calls from above */ - dma_cookie_t last_completed_cookie; bool ongoing; struct list_head active_list; struct list_head queue; @@ -284,7 +283,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) else iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); */ - td_chan->last_completed_cookie = txd->cookie; + td_chan->chan.completed_cookie = txd->cookie; td_chan->ongoing = false; callback = txd->callback; @@ -481,7 +480,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan) } spin_lock_bh(&td_chan->lock); - td_chan->last_completed_cookie = 1; + chan->completed_cookie = 1; chan->cookie = 1; spin_unlock_bh(&td_chan->lock); @@ -523,7 +522,7 @@ static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); - last_complete = td_chan->last_completed_cookie; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index cbd83e362b5e..6367e9c3b077 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -424,7 +424,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); - dc->completed = txd->cookie; + dc->chan.completed_cookie = txd->cookie; callback = txd->callback; param = txd->callback_param; @@ -976,7 +976,7 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t last_complete; int ret; - last_complete = dc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -985,7 +985,7 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, txx9dmac_scan_descriptors(dc); spin_unlock_bh(&dc->lock); - last_complete = dc->completed; + last_complete = chan->completed_cookie; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_complete, last_used); @@ -1057,7 +1057,7 @@ static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) return -EIO; } - dc->completed = chan->cookie = 1; + chan->completed_cookie = chan->cookie = 1; dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; txx9dmac_chan_set_SMPCHN(dc); @@ -1186,7 +1186,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) dc->ddev->chan[ch] = dc; dc->chan.device = &dc->dma; list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); - dc->chan.cookie = dc->completed = 1; + dc->chan.cookie = dc->chan.completed_cookie = 1; if (is_dmac64(dc)) dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index 365d42366b9f..f5a760598882 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h @@ -172,7 +172,6 @@ struct txx9dmac_chan { spinlock_t lock; /* these other elements are all protected by lock */ - dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index e6e28f37d8ec..eb5a21bc0b24 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -156,7 +156,6 @@ enum pl08x_dma_chan_state { * @runtime_addr: address for RX/TX according to the runtime config * @runtime_direction: current direction of this channel according to * runtime config - * @lc: last completed transaction on this channel * @pend_list: queued transactions pending on this channel * @at: active transaction on this channel * @lock: a lock for this channel data @@ -178,7 +177,7 @@ struct pl08x_dma_chan { u32 src_cctl; u32 dst_cctl; enum dma_data_direction runtime_direction; - dma_cookie_t lc; + enum dma_transfer_direction runtime_direction; struct list_head pend_list; struct pl08x_txd *at; spinlock_t lock; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8fbf40e0713c..61efb862aa8d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -175,6 +175,7 @@ struct dma_chan_percpu { * struct dma_chan - devices supply DMA channels, clients use them * @device: ptr to the dma device who supplies this channel, always !%NULL * @cookie: last cookie value returned to client + * @completed_cookie: last completed cookie for this channel * @chan_id: channel ID for sysfs * @dev: class device for sysfs * @device_node: used to add this to the device chan list @@ -186,6 +187,7 @@ struct dma_chan_percpu { struct dma_chan { struct dma_device *device; dma_cookie_t cookie; + dma_cookie_t completed_cookie; /* sysfs */ int chan_id; -- cgit v1.2.3