From 2eab9b1a3006b4f7bf49c55ce23943170a0068db Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 21 Jul 2016 12:40:52 -0700 Subject: [PATCH 001/169] dmaengine: ioatdma: fix uninitialized array usage Static analysis showed that unitialized array is being used for compare. At line 850 when a dma_mapping_error() occurs, it jumps to dma_unmap. At this point, dma_srcs has not been initialized. However, the code after dma_unmap label checks dma_srcs for a comparison and thus is comparing to random garbage in the array. Given that when dest_dma is being mapped this is the first instance of mapping DMA memory and failed, there is really nothing to be cleaned up and thus should jump to free_resources label instead. Signed-off-by: Dave Jiang Signed-off-by: Vinod Koul --- drivers/dma/ioat/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 7145f7716a92..015f7110b96d 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -828,7 +828,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dest_dma)) - goto dma_unmap; + goto free_resources; for (i = 0; i < IOAT_NUM_SRC_TEST; i++) dma_srcs[i] = DMA_ERROR_CODE; From f083f557186d77de5f14bf90aec1bfe2503cf513 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:10:31 -0700 Subject: [PATCH 002/169] dmaengine: Add helper function to prep for error reporting Dmaengine does not provide a way to pass back the result from a DMA transaction through the callback function. We are adding dmaengine helper function in order to prep for a mechanism that allow result status and other information through the callback. The initial conversion will make the existing driver use these new helper functions but retain the original behavior of the code. However, the helper functions paves a way towards adding the result parameter through callback. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.h | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 17f983a4e9ba..94a4379c7639 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -86,4 +86,76 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +struct dmaengine_desc_callback { + dma_async_tx_callback callback; + void *callback_param; +}; + +/** + * dmaengine_desc_get_callback - get the passed in callback function + * @tx: tx descriptor + * @cb: temp struct to hold the callback info + * + * Fill the passed in cb struct with what's available in the passed in + * tx descriptor struct + * No locking is required. + */ +static inline void +dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, + struct dmaengine_desc_callback *cb) +{ + cb->callback = tx->callback; + cb->callback_param = tx->callback_param; +} + +/** + * dmaengine_desc_callback_invoke - call the callback function in cb struct + * @cb: temp struct that is holding the callback info + * @result: dummy pointer for now + * + * Call the callback function provided in the cb struct with the parameter + * in the cb struct. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, + const void *result) +{ + if (cb->callback) + cb->callback(cb->callback_param); +} + +/** + * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and + * then immediately call the callback. + * @tx: dma async tx descriptor + * @result: dummy pointer for now + * + * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() + * in a single function since no work is necessary in between for the driver. + * Locking is dependent on the driver. + */ +static inline void +dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, + const void *result) +{ + struct dmaengine_desc_callback cb; + + dmaengine_desc_get_callback(tx, &cb); + dmaengine_desc_callback_invoke(&cb, result); +} + +/** + * dmaengine_desc_callback_valid - verify the callback is valid in cb + * @cb: callback info struct + * + * Return a bool that verifies whether callback in cb is valid or not. + * No locking is required. + */ +static inline bool +dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) +{ + return (cb->callback) ? true : false; +} + #endif From dff232dab98154903970c1e6b36f47c9defbd0a2 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:10:37 -0700 Subject: [PATCH 003/169] dmaengine: at_hdmac: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Nicolas Ferre Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 53d22eb73b56..a4c8f80db29d 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) /* for cyclic transfers, * no need to replay callback function while stopping */ if (!atc_chan_is_cyclic(atchan)) { - dma_async_tx_callback callback = txd->callback; - void *param = txd->callback_param; - /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ - if (callback) - callback(param); + dmaengine_desc_get_callback_invoke(txd, NULL); } dma_run_dependencies(txd); @@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) { struct at_desc *first = atc_first_active(atchan); struct dma_async_tx_descriptor *txd = &first->txd; - dma_async_tx_callback callback = txd->callback; - void *param = txd->callback_param; dev_vdbg(chan2dev(&atchan->chan_common), "new cyclic period llp 0x%08x\n", channel_readl(atchan, DSCR)); - if (callback) - callback(param); + dmaengine_desc_get_callback_invoke(txd, NULL); } /*-- IRQ & Tasklet ---------------------------------------------------*/ From a1d4eaaf4021a3a0c1299f254faf7b50ca9ddbfa Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:10:42 -0700 Subject: [PATCH 004/169] dmaengine: at_xdmac: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Ludovic Desroches Signed-off-by: Vinod Koul --- drivers/dma/at_xdmac.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e434ffe7bc5c..2badc57a7f31 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); txd = &desc->tx_dma_desc; - if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) - txd->callback(txd->callback_param); + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } static void at_xdmac_tasklet(unsigned long data) @@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data) if (!at_xdmac_chan_is_cyclic(atchan)) { dma_cookie_complete(txd); - if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT)) - txd->callback(txd->callback_param); + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } dma_run_dependencies(txd); From 3ab553d9f5ade52fc72a49b66bb5c6e356c2ba42 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:10:48 -0700 Subject: [PATCH 005/169] dmaengine: coh901318: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e4acd63e42aa..81d29f0262b8 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1888,8 +1888,7 @@ static void dma_tasklet(unsigned long data) struct coh901318_chan *cohc = (struct coh901318_chan *) data; struct coh901318_desc *cohd_fin; unsigned long flags; - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" " nbr_active_done %ld\n", __func__, @@ -1904,8 +1903,7 @@ static void dma_tasklet(unsigned long data) goto err; /* locate callback to client */ - callback = cohd_fin->desc.callback; - callback_param = cohd_fin->desc.callback_param; + dmaengine_desc_get_callback(&cohd_fin->desc, &cb); /* sign this job as completed on the channel */ dma_cookie_complete(&cohd_fin->desc); @@ -1920,8 +1918,7 @@ static void dma_tasklet(unsigned long data) spin_unlock_irqrestore(&cohc->lock, flags); /* Call the callback when we're done */ - if (callback) - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&cohc->lock, flags); From b310a619ab3fc78350e1dab4af0312af99c60b39 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:10:54 -0700 Subject: [PATCH 006/169] dmaengine: cppi41: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 4b2317426c8e..3b4c842b5720 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -331,7 +331,7 @@ static irqreturn_t cppi41_irq(int irq, void *data) c->residue = pd_trans_len(c->desc->pd6) - len; dma_cookie_complete(&c->txd); - c->txd.callback(c->txd.callback_param); + dmaengine_desc_get_callback_invoke(&c->txd, NULL); } } return IRQ_HANDLED; From 577ef92512a5baf90233ab5d0dd6dd7dca33ee96 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:00 -0700 Subject: [PATCH 007/169] dmaengine: dw: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index edf053f73a49..12eedd457193 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -270,20 +270,19 @@ static void dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, bool callback_required) { - dma_async_tx_callback callback = NULL; - void *param = NULL; struct dma_async_tx_descriptor *txd = &desc->txd; struct dw_desc *child; unsigned long flags; + struct dmaengine_desc_callback cb; dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); spin_lock_irqsave(&dwc->lock, flags); dma_cookie_complete(txd); - if (callback_required) { - callback = txd->callback; - param = txd->callback_param; - } + if (callback_required) + dmaengine_desc_get_callback(txd, &cb); + else + memset(&cb, 0, sizeof(cb)); /* async_tx_ack */ list_for_each_entry(child, &desc->tx_list, desc_node) @@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, dwc_desc_put(dwc, desc); spin_unlock_irqrestore(&dwc->lock, flags); - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); } static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) From dac86a148a8f363993dfe62931e0fb08d6108136 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:05 -0700 Subject: [PATCH 008/169] dmaengine: ep93xx_dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 21f08cc3352b..aba0b381cb8c 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -739,10 +739,10 @@ static void ep93xx_dma_tasklet(unsigned long data) { struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; struct ep93xx_dma_desc *desc, *d; - dma_async_tx_callback callback = NULL; - void *callback_param = NULL; + struct dmaengine_desc_callback cb; LIST_HEAD(list); + memset(&cb, 0, sizeof(cb)); spin_lock_irq(&edmac->lock); /* * If dma_terminate_all() was called before we get to run, the active @@ -757,8 +757,7 @@ static void ep93xx_dma_tasklet(unsigned long data) dma_cookie_complete(&desc->txd); list_splice_init(&edmac->active, &list); } - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + dmaengine_desc_get_callback(&desc->txd, &cb); } spin_unlock_irq(&edmac->lock); @@ -771,8 +770,7 @@ static void ep93xx_dma_tasklet(unsigned long data) ep93xx_dma_desc_put(edmac, desc); } - if (callback) - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); } static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) From 1595c3e1bf4dc7d75d8623795040fc71d340ebe3 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:11 -0700 Subject: [PATCH 009/169] dmaengine: fsl_raid: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/fsl_raid.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index aad167eaaee8..35d017a50502 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan) static void fsl_re_desc_done(struct fsl_re_desc *desc) { - dma_async_tx_callback callback; - void *callback_param; - dma_cookie_complete(&desc->async_tx); - - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) - callback(callback_param); - + dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); dma_descriptor_unmap(&desc->async_tx); } From af1a5a5114e0f3646dbe1be0d42ec1a3a373223f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:17 -0700 Subject: [PATCH 010/169] dmaengine: fsldma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Li Yang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 911b7177eb50..ef808665aeca 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, ret = txd->cookie; /* Run the link descriptor callback function */ - if (txd->callback) { - chan_dbg(chan, "LD %p callback\n", desc); - txd->callback(txd->callback_param); - } - + dmaengine_desc_get_callback_invoke(txd, NULL); dma_descriptor_unmap(txd); } From be5af2855af50edb7e81a99bc3e8c725839c6eff Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:22 -0700 Subject: [PATCH 011/169] dmaengine: imx-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/imx-dma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index a960608c0a4d..ab0fb804fb1e 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data) out: spin_unlock_irqrestore(&imxdma->lock, flags); - if (desc->desc.callback) - desc->desc.callback(desc->desc.callback_param); - + dmaengine_desc_get_callback_invoke(&desc->desc, NULL); } static int imxdma_terminate_all(struct dma_chan *chan) From 48dc77e2d4fc2e5d85da6e6892f228a75272d040 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:28 -0700 Subject: [PATCH 012/169] dmaengine: imx-sdma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 03ec76fc22ff..624facb6c8f4 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -650,8 +650,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) static void sdma_handle_channel_loop(struct sdma_channel *sdmac) { - if (sdmac->desc.callback) - sdmac->desc.callback(sdmac->desc.callback_param); + dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); } static void sdma_update_channel_loop(struct sdma_channel *sdmac) @@ -701,8 +700,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) sdmac->status = DMA_COMPLETE; dma_cookie_complete(&sdmac->desc); - if (sdmac->desc.callback) - sdmac->desc.callback(sdmac->desc.callback_param); + + dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); } static void sdma_tasklet(unsigned long data) From 63992864a2a55026fb11d1c9c686d348b205ce1f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:33 -0700 Subject: [PATCH 013/169] dmaengine: ioatdma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index bd09961443b1..6499de4b8e79 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -570,10 +570,8 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) if (tx->cookie) { dma_cookie_complete(tx); dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } + dmaengine_desc_get_callback_invoke(tx, NULL); + tx->callback = NULL; } if (tx->phys == phys_complete) @@ -707,10 +705,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) if (tx->cookie) { dma_cookie_complete(tx); dma_descriptor_unmap(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } + dmaengine_desc_get_callback_invoke(tx, NULL); + tx->callback = NULL; } } From db89e3c87715164abd7e2185a162499da1cdaa55 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:39 -0700 Subject: [PATCH 014/169] dmaengine: iop-adma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/iop-adma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index f039cfadf17b..a410657f7bcd 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, /* call the callback (must not sleep or submit new * operations to this channel) */ - if (tx->callback) - tx->callback(tx->callback_param); + dmaengine_desc_get_callback_invoke(tx, NULL); dma_descriptor_unmap(tx); if (desc->group_head) From 80a7d64325e79b811dd26254785323ccb1337fd5 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:45 -0700 Subject: [PATCH 015/169] dmaengine: ipu: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ipu/ipu_idmac.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index b54f62de9232..ed76044ce4b9 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) struct scatterlist **sg, *sgnext, *sgnew = NULL; /* Next transfer descriptor */ struct idmac_tx_desc *desc, *descnew; - dma_async_tx_callback callback; - void *callback_param; bool done = false; u32 ready0, ready1, curbuf, err; unsigned long flags; + struct dmaengine_desc_callback cb; /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ @@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (likely(sgnew) && ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { - callback = descnew->txd.callback; - callback_param = descnew->txd.callback_param; + dmaengine_desc_get_callback(&descnew->txd, &cb); + list_del_init(&descnew->list); spin_unlock(&ichan->lock); - if (callback) - callback(callback_param); + + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock(&ichan->lock); } @@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (done) dma_cookie_complete(&desc->txd); - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + dmaengine_desc_get_callback(&desc->txd, &cb); spin_unlock(&ichan->lock); - if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) - callback(callback_param); + if (done && (desc->txd.flags & DMA_PREP_INTERRUPT)) + dmaengine_desc_callback_invoke(&cb, NULL); return IRQ_HANDLED; } From 7a883acd3932fa3c24fef524d118b5784abb1c5e Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:50 -0700 Subject: [PATCH 016/169] dmaengine: mic_x100_dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Sudeep Dutt Signed-off-by: Vinod Koul --- drivers/dma/mic_x100_dma.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 1502b24b7c7d..818255844a3c 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch) tx = &ch->tx_array[last_tail]; if (tx->cookie) { dma_cookie_complete(tx); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } + dmaengine_desc_get_callback_invoke(tx, NULL); + tx->callback = NULL; } last_tail = mic_dma_hw_ring_inc(last_tail); } From 9c1e511cc6675f3e2102e4a69a5d6eda18cfacc1 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:11:56 -0700 Subject: [PATCH 017/169] dmaengine: mmp_pdma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index f4b25fb0d040..eb3a1f42ab06 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data) struct mmp_pdma_desc_sw *desc, *_desc; LIST_HEAD(chain_cleanup); unsigned long flags; + struct dmaengine_desc_callback cb; if (chan->cyclic_first) { - dma_async_tx_callback cb = NULL; - void *cb_data = NULL; - spin_lock_irqsave(&chan->desc_lock, flags); desc = chan->cyclic_first; - cb = desc->async_tx.callback; - cb_data = desc->async_tx.callback_param; + dmaengine_desc_get_callback(&desc->async_tx, &cb); spin_unlock_irqrestore(&chan->desc_lock, flags); - if (cb) - cb(cb_data); + dmaengine_desc_callback_invoke(&cb, NULL); return; } @@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data) /* Remove from the list of transactions */ list_del(&desc->node); /* Run the link descriptor callback function */ - if (txd->callback) - txd->callback(txd->callback_param); + dmaengine_desc_get_callback(txd, &cb); + dmaengine_desc_callback_invoke(&cb, NULL); dma_pool_free(chan->desc_pool, desc, txd->phys); } From 81141bac700568afec626abda493310720077043 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:01 -0700 Subject: [PATCH 018/169] dmaengine: mmp_tdma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index b3441f57a364..0e933849e80f 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data) { struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; - if (tdmac->desc.callback) - tdmac->desc.callback(tdmac->desc.callback_param); - + dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL); } static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) From ad34636885bee29e473bea4e2540fe5fb8efb564 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:07 -0700 Subject: [PATCH 019/169] dmaengine: mpc512x_dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/mpc512x_dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index fa86592c7ae1..cacb78e34136 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma) list_for_each_entry(mdesc, &list, node) { desc = &mdesc->desc; - if (desc->callback) - desc->callback(desc->callback_param); + dmaengine_desc_get_callback_invoke(desc, NULL); last_cookie = desc->cookie; dma_run_dependencies(desc); From ee7681a48063111c5bcb0385809ec2be90eabd70 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:13 -0700 Subject: [PATCH 020/169] dmaengine: mv_xor: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f4c9f98ec35e..f8b5e7424b3a 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, /* call the callback (must not sleep or submit new * operations to this channel) */ - if (desc->async_tx.callback) - desc->async_tx.callback( - desc->async_tx.callback_param); - + dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); dma_descriptor_unmap(&desc->async_tx); } From 064370c6a1d97dda4b8827c02782587023c5c227 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:18 -0700 Subject: [PATCH 021/169] dmaengine: mxs-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 60de35251da5..50e64e113ffb 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data) { struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; - if (mxs_chan->desc.callback) - mxs_chan->desc.callback(mxs_chan->desc.callback_param); + dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL); } static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) From 0024b2ac374490de46bfd35c43002c7800ad14ed Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:24 -0700 Subject: [PATCH 022/169] dmaengine: nbpfaxi: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 08c45c185549..09de71519d37 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data) { struct nbpf_channel *chan = (struct nbpf_channel *)data; struct nbpf_desc *desc, *tmp; - dma_async_tx_callback callback; - void *param; + struct dmaengine_desc_callback cb; while (!list_empty(&chan->done)) { bool found = false, must_put, recycling = false; @@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data) must_put = false; } - callback = desc->async_tx.callback; - param = desc->async_tx.callback_param; + dmaengine_desc_get_callback(&desc->async_tx, &cb); /* ack and callback completed descriptor */ spin_unlock_irq(&chan->lock); - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); if (must_put) nbpf_desc_put(desc); From 5c066f7d01917ed76f2b8d395d7da3070ff16455 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:29 -0700 Subject: [PATCH 023/169] dmaengine: pch_dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pch_dma.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 113605f6fe20..df95727dc2fb 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) { struct dma_async_tx_descriptor *txd = &desc->txd; - dma_async_tx_callback callback = txd->callback; - void *param = txd->callback_param; + struct dmaengine_desc_callback cb; + dmaengine_desc_get_callback(txd, &cb); list_splice_init(&desc->tx_list, &pd_chan->free_list); list_move(&desc->desc_node, &pd_chan->free_list); - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); } static void pdc_complete_all(struct pch_dma_chan *pd_chan) From f08462c650a7e2ec5b68adef94505e1c34fdb309 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:35 -0700 Subject: [PATCH 024/169] dmaengine: pl330: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4fc3ffbd5ca0..1ecd4674aa23 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data) } while (!list_empty(&pch->completed_list)) { - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; desc = list_first_entry(&pch->completed_list, struct dma_pl330_desc, node); - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + dmaengine_desc_get_callback(&desc->txd, &cb); if (pch->cyclic) { desc->status = PREP; @@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data) dma_descriptor_unmap(&desc->txd); - if (callback) { + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irqrestore(&pch->lock, flags); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&pch->lock, flags); } } From 44967bf73352ccc0ea9346fc1c7dd805bc2279b5 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:40 -0700 Subject: [PATCH 025/169] dmaengine: ppc4xx_adma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ppc4xx/adma.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index da3688b94bdc..140f3ed429f4 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( /* call the callback (must not sleep or submit new * operations to this channel) */ - if (desc->async_tx.callback) - desc->async_tx.callback( - desc->async_tx.callback_param); - + dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); dma_descriptor_unmap(&desc->async_tx); } From 5ade6683e916df6a00a9747ccc40191fff83a064 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:47 -0700 Subject: [PATCH 026/169] dmaengine: qcom_hidma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Acked-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index b2374cd91e45..1197fbf8f30e 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -132,8 +132,8 @@ static void hidma_process_completed(struct hidma_chan *mchan) spin_unlock_irqrestore(&mchan->lock, irqflags); llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); - if (desc->callback && (llstat == DMA_COMPLETE)) - desc->callback(desc->callback_param); + if (llstat == DMA_COMPLETE) + dmaengine_desc_get_callback_invoke(desc, NULL); last_cookie = desc->cookie; dma_run_dependencies(desc); @@ -413,14 +413,9 @@ static int hidma_terminate_channel(struct dma_chan *chan) /* return all user requests */ list_for_each_entry_safe(mdesc, tmp, &list, node) { struct dma_async_tx_descriptor *txd = &mdesc->desc; - dma_async_tx_callback callback = mdesc->desc.callback; - void *param = mdesc->desc.callback_param; dma_descriptor_unmap(txd); - - if (callback) - callback(param); - + dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); /* move myself to free_list */ From 964b2fd88bf5fffe2997663b04952f2d109f3b54 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:12:53 -0700 Subject: [PATCH 027/169] dmaengine: sh_rcar-dmac: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 0dd953884d1d..d1defa4646ba 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) { struct rcar_dmac_chan *chan = dev; struct rcar_dmac_desc *desc; + struct dmaengine_desc_callback cb; spin_lock_irq(&chan->lock); /* For cyclic transfers notify the user after every chunk. */ if (chan->desc.running && chan->desc.running->cyclic) { - dma_async_tx_callback callback; - void *callback_param; - desc = chan->desc.running; - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; + dmaengine_desc_get_callback(&desc->async_tx, &cb); - if (callback) { + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irq(&chan->lock); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irq(&chan->lock); } } @@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) dma_cookie_complete(&desc->async_tx); list_del(&desc->node); - if (desc->async_tx.callback) { + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irq(&chan->lock); /* * We own the only reference to this descriptor, we can * safely dereference it without holding the channel * lock. */ - desc->async_tx.callback(desc->async_tx.callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irq(&chan->lock); } From b8bdebb98a56c953808388295670f79b88313fd1 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:05 -0700 Subject: [PATCH 028/169] dmaengine: sirf-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/sirf-dma.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index d8bc3f2a71db..a96e4a480de5 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) list_for_each_entry(sdesc, &list, node) { desc = &sdesc->desc; - if (desc->callback) - desc->callback(desc->callback_param); - + dmaengine_desc_get_callback_invoke(desc, NULL); last_cookie = desc->cookie; dma_run_dependencies(desc); } @@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) desc = &sdesc->desc; while (happened_cyclic != schan->completed_cyclic) { - if (desc->callback) - desc->callback(desc->callback_param); + dmaengine_desc_get_callback_invoke(desc, NULL); schan->completed_cyclic++; } } From 3a315d5d4b3ba18062e8c5f0cfa5373768cb91d6 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:10 -0700 Subject: [PATCH 029/169] dmaengine: ste_dma40: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8b18e44a02d5..73203ac83734 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1596,8 +1596,7 @@ static void dma_tasklet(unsigned long data) struct d40_desc *d40d; unsigned long flags; bool callback_active; - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; spin_lock_irqsave(&d40c->lock, flags); @@ -1624,8 +1623,7 @@ static void dma_tasklet(unsigned long data) /* Callback to client */ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT); - callback = d40d->txd.callback; - callback_param = d40d->txd.callback_param; + dmaengine_desc_get_callback(&d40d->txd, &cb); if (!d40d->cyclic) { if (async_tx_test_ack(&d40d->txd)) { @@ -1646,8 +1644,8 @@ static void dma_tasklet(unsigned long data) spin_unlock_irqrestore(&d40c->lock, flags); - if (callback_active && callback) - callback(callback_param); + if (callback_active) + dmaengine_desc_callback_invoke(&cb, NULL); return; From 370c0446af5e3c8ddefbb753a1df84c278fff6d0 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:16 -0700 Subject: [PATCH 030/169] dmaengine: tegra20-apb-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Cc: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra20-apb-dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 6ab9eb98588a..3722b9d8d9fe 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, static void tegra_dma_tasklet(unsigned long data) { struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; - dma_async_tx_callback callback = NULL; - void *callback_param = NULL; + struct dmaengine_desc_callback cb; struct tegra_dma_desc *dma_desc; unsigned long flags; int cb_count; @@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data) dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc), cb_node); list_del(&dma_desc->cb_node); - callback = dma_desc->txd.callback; - callback_param = dma_desc->txd.callback_param; + dmaengine_desc_get_callback(&dma_desc->txd, &cb); cb_count = dma_desc->cb_count; dma_desc->cb_count = 0; spin_unlock_irqrestore(&tdc->lock, flags); - while (cb_count-- && callback) - callback(callback_param); + while (cb_count--) + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&tdc->lock, flags); } spin_unlock_irqrestore(&tdc->lock, flags); From a06a5bb908248cdc4f696ae5b6334e3b2e150fbc Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:22 -0700 Subject: [PATCH 031/169] dmaengine: timb_dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/timb_dma.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index e82745aa42a8..896bafb7a532 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan) static void __td_finish(struct timb_dma_chan *td_chan) { - dma_async_tx_callback callback; - void *param; + struct dmaengine_desc_callback cb; struct dma_async_tx_descriptor *txd; struct timb_dma_desc *td_desc; @@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) dma_cookie_complete(txd); td_chan->ongoing = false; - callback = txd->callback; - param = txd->callback_param; + dmaengine_desc_get_callback(txd, &cb); list_move(&td_desc->desc_node, &td_chan->free_list); @@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); } static u32 __td_ier_mask(struct timb_dma *td) From d254c8d0a725f566dc83d4dda81980c63d7f3838 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:28 -0700 Subject: [PATCH 032/169] dmaengine: txx9dmac: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/txx9dmac.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 7632290e7c14..4d8c7b9078fd 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -403,16 +403,14 @@ static void txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) { - dma_async_tx_callback callback; - void *param; + struct dmaengine_desc_callback cb; struct dma_async_tx_descriptor *txd = &desc->txd; dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", txd->cookie, desc); dma_cookie_complete(txd); - callback = txd->callback; - param = txd->callback_param; + dmaengine_desc_get_callback(txd, &cb); txx9dmac_sync_desc_for_cpu(dc, desc); list_splice_init(&desc->tx_list, &dc->free_list); @@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); dma_run_dependencies(txd); } From 4f03ac6a2da050591e6e78921cacdff1546418c3 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:33 -0700 Subject: [PATCH 033/169] dmaengine: virt-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/virt-dma.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index a35c211857dd..e47fc9b0944f 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg) { struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; struct virt_dma_desc *vd; - dma_async_tx_callback cb = NULL; - void *cb_data = NULL; + struct dmaengine_desc_callback cb; LIST_HEAD(head); spin_lock_irq(&vc->lock); @@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg) vd = vc->cyclic; if (vd) { vc->cyclic = NULL; - cb = vd->tx.callback; - cb_data = vd->tx.callback_param; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); } spin_unlock_irq(&vc->lock); - if (cb) - cb(cb_data); + dmaengine_desc_callback_invoke(&cb, NULL); while (!list_empty(&head)) { vd = list_first_entry(&head, struct virt_dma_desc, node); - cb = vd->tx.callback; - cb_data = vd->tx.callback_param; + dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); if (dmaengine_desc_test_reuse(&vd->tx)) @@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg) else vc->desc_free(vd); - if (cb) - cb(cb_data); + dmaengine_desc_callback_invoke(&cb, NULL); } } From b1f884a5ff23534c3808926aeb62187be55e751e Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:39 -0700 Subject: [PATCH 034/169] dmaengine: xgene-dma: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 9cb93c5b655d..d66ed11baaec 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, dma_cookie_complete(tx); /* Run the link descriptor callback function */ - if (tx->callback) - tx->callback(tx->callback_param); + dmaengine_desc_get_callback_invoke(tx, NULL); dma_descriptor_unmap(tx); From f067025bc676ba8d18fba5f959598339e39b86db Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:50 -0700 Subject: [PATCH 035/169] dmaengine: add support to provide error result from a DMA transation Adding a new callback that will provide the error result for a transaction. The result is allocated on the stack and the callback should create a copy if it wishes to retain the information after exiting. The result parameter is now defined and takes over the dummy void pointer we placed in the helper functions previously. dmaengine drivers should start converting to the new "callback_result" callback in order to receive transaction results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.h | 22 +++++++++++++++++----- include/linux/dmaengine.h | 16 ++++++++++++++++ 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 94a4379c7639..882ff9448c3b 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -88,6 +88,7 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) struct dmaengine_desc_callback { dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; void *callback_param; }; @@ -105,13 +106,14 @@ dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, struct dmaengine_desc_callback *cb) { cb->callback = tx->callback; + cb->callback_result = tx->callback_result; cb->callback_param = tx->callback_param; } /** * dmaengine_desc_callback_invoke - call the callback function in cb struct * @cb: temp struct that is holding the callback info - * @result: dummy pointer for now + * @result: transaction result * * Call the callback function provided in the cb struct with the parameter * in the cb struct. @@ -119,17 +121,27 @@ dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx, */ static inline void dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, - const void *result) + const struct dmaengine_result *result) { - if (cb->callback) + struct dmaengine_result dummy_result = { + .result = DMA_TRANS_NOERROR, + .residue = 0 + }; + + if (cb->callback_result) { + if (!result) + result = &dummy_result; + cb->callback_result(cb->callback_param, result); + } else if (cb->callback) { cb->callback(cb->callback_param); + } } /** * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and * then immediately call the callback. * @tx: dma async tx descriptor - * @result: dummy pointer for now + * @result: transaction result * * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke() * in a single function since no work is necessary in between for the driver. @@ -137,7 +149,7 @@ dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb, */ static inline void dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx, - const void *result) + const struct dmaengine_result *result) { struct dmaengine_desc_callback cb; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 30de0197263a..cc535a478bae 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); +enum dmaengine_tx_result { + DMA_TRANS_NOERROR = 0, /* SUCCESS */ + DMA_TRANS_READ_FAILED, /* Source DMA read failed */ + DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */ + DMA_TRANS_ABORTED, /* Op never submitted / aborted */ +}; + +struct dmaengine_result { + enum dmaengine_tx_result result; + u32 residue; +}; + +typedef void (*dma_async_tx_callback_result)(void *dma_async_param, + const struct dmaengine_result *result); + struct dmaengine_unmap_data { u8 map_cnt; u8 to_cnt; @@ -478,6 +493,7 @@ struct dma_async_tx_descriptor { dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); int (*desc_free)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; + dma_async_tx_callback_result callback_result; void *callback_param; struct dmaengine_unmap_data *unmap; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH From 9546d4cdc8445acdea415f70a330bbfbd016a0f0 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:13:55 -0700 Subject: [PATCH 036/169] dmaengine: ioatdma: Add error handling to ioat driver Adding error handling to the ioatdma driver so that when a read/write error occurs the error results are reported back and all the remaining descriptors are aborted. This utilizes the new dmaengine callback function that allows reporting of results. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 140 +++++++++++++++++++++++++++++++---- drivers/dma/ioat/registers.h | 2 + 2 files changed, 126 insertions(+), 16 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 6499de4b8e79..251f8be639c2 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -568,10 +568,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) tx = &desc->txd; if (tx->cookie) { + struct dmaengine_result res; + dma_cookie_complete(tx); dma_descriptor_unmap(tx); + res.result = DMA_TRANS_NOERROR; dmaengine_desc_get_callback_invoke(tx, NULL); tx->callback = NULL; + tx->callback_result = NULL; } if (tx->phys == phys_complete) @@ -620,7 +624,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) if (is_ioat_halted(*ioat_chan->completion)) { u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + if (chanerr & + (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) { mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); ioat_eh(ioat_chan); } @@ -650,6 +655,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) __ioat_restart_chan(ioat_chan); } + +static void ioat_abort_descs(struct ioatdma_chan *ioat_chan) +{ + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; + u16 active; + int idx = ioat_chan->tail, i; + + /* + * We assume that the failed descriptor has been processed. + * Now we are just returning all the remaining submitted + * descriptors to abort. + */ + active = ioat_ring_active(ioat_chan); + + /* we skip the failed descriptor that tail points to */ + for (i = 1; i < active; i++) { + struct dma_async_tx_descriptor *tx; + + smp_read_barrier_depends(); + prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1)); + desc = ioat_get_ring_ent(ioat_chan, idx + i); + + tx = &desc->txd; + if (tx->cookie) { + struct dmaengine_result res; + + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + res.result = DMA_TRANS_ABORTED; + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } + + /* skip extended descriptors */ + if (desc_has_ext(desc)) { + WARN_ON(i + 1 >= active); + i++; + } + + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat_free_sed(ioat_dma, desc->sed); + desc->sed = NULL; + } + } + + smp_mb(); /* finish all descriptor reads before incrementing tail */ + ioat_chan->tail = idx + active; + + desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); + ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys; +} + static void ioat_eh(struct ioatdma_chan *ioat_chan) { struct pci_dev *pdev = to_pdev(ioat_chan); @@ -660,6 +720,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) u32 err_handled = 0; u32 chanerr_int; u32 chanerr; + bool abort = false; + struct dmaengine_result res; /* cleanup so tail points to descriptor that caused the error */ if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) @@ -695,28 +757,50 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) break; } + if (chanerr & IOAT_CHANERR_RECOVER_MASK) { + if (chanerr & IOAT_CHANERR_READ_DATA_ERR) { + res.result = DMA_TRANS_READ_FAILED; + err_handled |= IOAT_CHANERR_READ_DATA_ERR; + } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) { + res.result = DMA_TRANS_WRITE_FAILED; + err_handled |= IOAT_CHANERR_WRITE_DATA_ERR; + } + + abort = true; + } else + res.result = DMA_TRANS_NOERROR; + /* fault on unhandled error or spurious halt */ if (chanerr ^ err_handled || chanerr == 0) { dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", __func__, chanerr, err_handled); BUG(); - } else { /* cleanup the faulty descriptor */ - tx = &desc->txd; - if (tx->cookie) { - dma_cookie_complete(tx); - dma_descriptor_unmap(tx); - dmaengine_desc_get_callback_invoke(tx, NULL); - tx->callback = NULL; - } } - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); + /* cleanup the faulty descriptor since we are continuing */ + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } /* mark faulting descriptor as complete */ *ioat_chan->completion = desc->txd.phys; spin_lock_bh(&ioat_chan->prep_lock); + /* we need abort all descriptors */ + if (abort) { + ioat_abort_descs(ioat_chan); + /* clean up the channel, we could be in weird state */ + ioat_reset_hw(ioat_chan); + } + + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); + ioat_restart_channel(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); } @@ -749,10 +833,25 @@ void ioat_timer_event(unsigned long data) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", __func__, chanerr); - if (test_bit(IOAT_RUN, &ioat_chan->state)) - BUG_ON(is_ioat_bug(chanerr)); - else /* we never got off the ground */ - return; + if (test_bit(IOAT_RUN, &ioat_chan->state)) { + spin_lock_bh(&ioat_chan->cleanup_lock); + spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + ioat_abort_descs(ioat_chan); + dev_warn(to_dev(ioat_chan), "Reset channel...\n"); + ioat_reset_hw(ioat_chan); + dev_warn(to_dev(ioat_chan), "Restart channel...\n"); + ioat_restart_channel(ioat_chan); + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + spin_unlock_bh(&ioat_chan->cleanup_lock); + } + + return; } spin_lock_bh(&ioat_chan->cleanup_lock); @@ -776,14 +875,23 @@ void ioat_timer_event(unsigned long data) u32 chanerr; chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", status, chanerr); dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", ioat_ring_active(ioat_chan)); spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + ioat_abort_descs(ioat_chan); + dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); + ioat_reset_hw(ioat_chan); + dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); ioat_restart_channel(ioat_chan); + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->cleanup_lock); return; diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 70534981a49b..48fa4cf9f64a 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -240,6 +240,8 @@ #define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 #define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR) +#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \ + IOAT_CHANERR_WRITE_DATA_ERR) #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ From aed681d1dc72914d448e44a99e1dc89baa32d25c Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:14:01 -0700 Subject: [PATCH 037/169] dmaengine: ioatdma: add error strings to chanerr output Provide a mechanism to translate CHANERR bits to English strings in order to allow user to report more concise errors. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/ioat/dma.c | 65 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 251f8be639c2..49386ce04bf5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,8 +38,54 @@ #include "../dmaengine.h" +static char *chanerr_str[] = { + "DMA Transfer Destination Address Error", + "Next Descriptor Address Error", + "Descriptor Error", + "Chan Address Value Error", + "CHANCMD Error", + "Chipset Uncorrectable Data Integrity Error", + "DMA Uncorrectable Data Integrity Error", + "Read Data Error", + "Write Data Error", + "Descriptor Control Error", + "Descriptor Transfer Size Error", + "Completion Address Error", + "Interrupt Configuration Error", + "Super extended descriptor Address Error", + "Unaffiliated Error", + "CRC or XOR P Error", + "XOR Q Error", + "Descriptor Count Error", + "DIF All F detect Error", + "Guard Tag verification Error", + "Application Tag verification Error", + "Reference Tag verification Error", + "Bundle Bit Error", + "Result DIF All F detect Error", + "Result Guard Tag verification Error", + "Result Application Tag verification Error", + "Result Reference Tag verification Error", + NULL +}; + static void ioat_eh(struct ioatdma_chan *ioat_chan); +static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr) +{ + int i; + + for (i = 0; i < 32; i++) { + if ((chanerr >> i) & 1) { + if (chanerr_str[i]) { + dev_err(to_dev(ioat_chan), "Err(%d): %s\n", + i, chanerr_str[i]); + } else + break; + } + } +} + /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode * @irq: interrupt id @@ -774,6 +820,11 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) if (chanerr ^ err_handled || chanerr == 0) { dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", __func__, chanerr, err_handled); + dev_err(to_dev(ioat_chan), "Errors handled:\n"); + ioat_print_chanerrs(ioat_chan, err_handled); + dev_err(to_dev(ioat_chan), "Errors not handled:\n"); + ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled)); + BUG(); } @@ -833,6 +884,9 @@ void ioat_timer_event(unsigned long data) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", __func__, chanerr); + dev_err(to_dev(ioat_chan), "Errors:\n"); + ioat_print_chanerrs(ioat_chan, chanerr); + if (test_bit(IOAT_RUN, &ioat_chan->state)) { spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -875,10 +929,13 @@ void ioat_timer_event(unsigned long data) u32 chanerr; chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", - status, chanerr); - dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n", - ioat_ring_active(ioat_chan)); + dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", + status, chanerr); + dev_err(to_dev(ioat_chan), "Errors:\n"); + ioat_print_chanerrs(ioat_chan, chanerr); + + dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", + ioat_ring_active(ioat_chan)); spin_lock_bh(&ioat_chan->prep_lock); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); From 9cabc2691e9d21b840b145a944f09299f895a7e0 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:14:07 -0700 Subject: [PATCH 038/169] ntb: add DMA error handling for TX DMA Adding support on the tx DMA path to allow recovery of errors when DMA responds with error status and abort all the subsequent ops. Signed-off-by: Dave Jiang Acked-by: Allen Hubbe Cc: Jon Mason Cc: linux-ntb@googlegroups.com Signed-off-by: Vinod Koul --- drivers/ntb/ntb_transport.c | 110 +++++++++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 27 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index d5c5894f252e..e61db11e6ccc 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -102,6 +102,9 @@ struct ntb_queue_entry { void *buf; unsigned int len; unsigned int flags; + int retries; + int errors; + unsigned int tx_index; struct ntb_transport_qp *qp; union { @@ -259,6 +262,9 @@ enum { static void ntb_transport_rxc_db(unsigned long data); static const struct ntb_ctx_ops ntb_transport_ops; static struct ntb_client ntb_transport_client; +static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry); +static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); static int ntb_transport_bus_match(struct device *dev, struct device_driver *drv) @@ -1467,12 +1473,39 @@ static void ntb_transport_rxc_db(unsigned long data) } } -static void ntb_tx_copy_callback(void *data) +static void ntb_tx_copy_callback(void *data, + const struct dmaengine_result *res) { struct ntb_queue_entry *entry = data; struct ntb_transport_qp *qp = entry->qp; struct ntb_payload_header __iomem *hdr = entry->tx_hdr; + /* we need to check DMA results if we are using DMA */ + if (res) { + enum dmaengine_tx_result dma_err = res->result; + + switch (dma_err) { + case DMA_TRANS_READ_FAILED: + case DMA_TRANS_WRITE_FAILED: + entry->errors++; + case DMA_TRANS_ABORTED: + { + void __iomem *offset = + qp->tx_mw + qp->tx_max_frame * + entry->tx_index; + + /* resubmit via CPU */ + ntb_memcpy_tx(entry, offset); + qp->tx_memcpy++; + return; + } + + case DMA_TRANS_NOERROR: + default: + break; + } + } + iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); @@ -1507,40 +1540,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) /* Ensure that the data is fully copied out before setting the flags */ wmb(); - ntb_tx_copy_callback(entry); + ntb_tx_copy_callback(entry, NULL); } -static void ntb_async_tx(struct ntb_transport_qp *qp, - struct ntb_queue_entry *entry) +static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry) { - struct ntb_payload_header __iomem *hdr; struct dma_async_tx_descriptor *txd; struct dma_chan *chan = qp->tx_dma_chan; struct dma_device *device; + size_t len = entry->len; + void *buf = entry->buf; size_t dest_off, buff_off; struct dmaengine_unmap_data *unmap; dma_addr_t dest; dma_cookie_t cookie; - void __iomem *offset; - size_t len = entry->len; - void *buf = entry->buf; int retries = 0; - offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; - hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); - entry->tx_hdr = hdr; - - iowrite32(entry->len, &hdr->len); - iowrite32((u32)qp->tx_pkts, &hdr->ver); - - if (!chan) - goto err; - - if (len < copy_bytes) - goto err; - device = chan->device; - dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; + dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; buff_off = (size_t)buf & ~PAGE_MASK; dest_off = (size_t)dest & ~PAGE_MASK; @@ -1560,8 +1578,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, unmap->to_cnt = 1; for (retries = 0; retries < DMA_RETRIES; retries++) { - txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], - len, DMA_PREP_INTERRUPT); + txd = device->device_prep_dma_memcpy(chan, dest, + unmap->addr[0], len, + DMA_PREP_INTERRUPT); if (txd) break; @@ -1574,7 +1593,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, goto err_get_unmap; } - txd->callback = ntb_tx_copy_callback; + txd->callback_result = ntb_tx_copy_callback; txd->callback_param = entry; dma_set_unmap(txd, unmap); @@ -1585,13 +1604,47 @@ static void ntb_async_tx(struct ntb_transport_qp *qp, dmaengine_unmap_put(unmap); dma_async_issue_pending(chan); - qp->tx_async++; - return; + return 0; err_set_unmap: dmaengine_unmap_put(unmap); err_get_unmap: dmaengine_unmap_put(unmap); +err: + return -ENXIO; +} + +static void ntb_async_tx(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry) +{ + struct ntb_payload_header __iomem *hdr; + struct dma_chan *chan = qp->tx_dma_chan; + void __iomem *offset; + int res; + + entry->tx_index = qp->tx_index; + offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; + hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); + entry->tx_hdr = hdr; + + iowrite32(entry->len, &hdr->len); + iowrite32((u32)qp->tx_pkts, &hdr->ver); + + if (!chan) + goto err; + + if (entry->len < copy_bytes) + goto err; + + res = ntb_async_tx_submit(qp, entry); + if (res < 0) + goto err; + + if (!entry->retries) + qp->tx_async++; + + return; + err: ntb_memcpy_tx(entry, offset); qp->tx_memcpy++; @@ -1970,6 +2023,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, entry->buf = data; entry->len = len; entry->flags = 0; + entry->errors = 0; + entry->retries = 0; + entry->tx_index = 0; rc = ntb_process_tx(qp, entry); if (rc) From 72203572afd7aef243c182f19925e5a77a1dc6a1 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:14:13 -0700 Subject: [PATCH 039/169] ntb: add DMA error handling for RX DMA Adding support on the rx DMA path to allow recovery of errors when DMA responds with error status and abort all the subsequent ops. Signed-off-by: Dave Jiang Acked-by: Allen Hubbe Cc: Jon Mason Cc: linux-ntb@googlegroups.com Signed-off-by: Vinod Koul --- drivers/ntb/ntb_transport.c | 83 ++++++++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 16 deletions(-) diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e61db11e6ccc..8601c10acf74 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -105,13 +105,13 @@ struct ntb_queue_entry { int retries; int errors; unsigned int tx_index; + unsigned int rx_index; struct ntb_transport_qp *qp; union { struct ntb_payload_header __iomem *tx_hdr; struct ntb_payload_header *rx_hdr; }; - unsigned int index; }; struct ntb_rx_info { @@ -265,6 +265,9 @@ static struct ntb_client ntb_transport_client; static int ntb_async_tx_submit(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); +static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); + static int ntb_transport_bus_match(struct device *dev, struct device_driver *drv) @@ -1235,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp) break; entry->rx_hdr->flags = 0; - iowrite32(entry->index, &qp->rx_info->entry); + iowrite32(entry->rx_index, &qp->rx_info->entry); cb_data = entry->cb_data; len = entry->len; @@ -1253,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp) spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); } -static void ntb_rx_copy_callback(void *data) +static void ntb_rx_copy_callback(void *data, + const struct dmaengine_result *res) { struct ntb_queue_entry *entry = data; + /* we need to check DMA results if we are using DMA */ + if (res) { + enum dmaengine_tx_result dma_err = res->result; + + switch (dma_err) { + case DMA_TRANS_READ_FAILED: + case DMA_TRANS_WRITE_FAILED: + entry->errors++; + case DMA_TRANS_ABORTED: + { + struct ntb_transport_qp *qp = entry->qp; + void *offset = qp->rx_buff + qp->rx_max_frame * + qp->rx_index; + + ntb_memcpy_rx(entry, offset); + qp->rx_memcpy++; + return; + } + + case DMA_TRANS_NOERROR: + default: + break; + } + } + entry->flags |= DESC_DONE_FLAG; ntb_complete_rxc(entry->qp); @@ -1272,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) /* Ensure that the data is fully copied out before clearing the flag */ wmb(); - ntb_rx_copy_callback(entry); + ntb_rx_copy_callback(entry, NULL); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; @@ -1288,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) int retries = 0; len = entry->len; - - if (!chan) - goto err; - - if (len < copy_bytes) - goto err; - device = chan->device; pay_off = (size_t)offset & ~PAGE_MASK; buff_off = (size_t)buf & ~PAGE_MASK; @@ -1322,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) unmap->from_cnt = 1; for (retries = 0; retries < DMA_RETRIES; retries++) { - txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + txd = device->device_prep_dma_memcpy(chan, + unmap->addr[1], unmap->addr[0], len, DMA_PREP_INTERRUPT); if (txd) @@ -1337,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) goto err_get_unmap; } - txd->callback = ntb_rx_copy_callback; + txd->callback_result = ntb_rx_copy_callback; txd->callback_param = entry; dma_set_unmap(txd, unmap); @@ -1351,12 +1374,37 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) qp->rx_async++; - return; + return 0; err_set_unmap: dmaengine_unmap_put(unmap); err_get_unmap: dmaengine_unmap_put(unmap); +err: + return -ENXIO; +} + +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) +{ + struct ntb_transport_qp *qp = entry->qp; + struct dma_chan *chan = qp->rx_dma_chan; + int res; + + if (!chan) + goto err; + + if (entry->len < copy_bytes) + goto err; + + res = ntb_async_rx_submit(entry, offset); + if (res < 0) + goto err; + + if (!entry->retries) + qp->rx_async++; + + return; + err: ntb_memcpy_rx(entry, offset); qp->rx_memcpy++; @@ -1403,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) } entry->rx_hdr = hdr; - entry->index = qp->rx_index; + entry->rx_index = qp->rx_index; if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, @@ -1981,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, entry->buf = data; entry->len = len; entry->flags = 0; + entry->retries = 0; + entry->errors = 0; + entry->rx_index = 0; ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); From 9686218982709122373b755c9d7457bed0872999 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 13:14:19 -0700 Subject: [PATCH 040/169] dmaengine: documentation to the new callback mechanism Adding documentation in dmaengine/provider.txt on the usage of callback with result, callback_result, function pointer to retrieve transanction result from a DMA submission. Signed-off-by: Dave Jiang Reviewed-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- Documentation/dmaengine/provider.txt | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 91ce82d5f0c4..c4fd47540b31 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -282,6 +282,17 @@ supported. that is supposed to push the current transaction descriptor to a pending queue, waiting for issue_pending to be called. + - In this structure the function pointer callback_result can be + initialized in order for the submitter to be notified that a + transaction has completed. In the earlier code the function pointer + callback has been used. However it does not provide any status to the + transaction and will be deprecated. The result structure defined as + dmaengine_result that is passed in to callback_result has two fields: + + result: This provides the transfer result defined by + dmaengine_tx_result. Either success or some error + condition. + + residue: Provides the residue bytes of the transfer for those that + support residue. * device_issue_pending - Takes the first transaction descriptor in the pending queue, From 9b335978f7081cd4fe264709599a18073e12fee2 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Jul 2016 10:33:57 -0700 Subject: [PATCH 041/169] dmaengine: fsldma: move unmap to before callback Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang Acked-by: Li Yang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ef808665aeca..b557ccbaa1b5 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -516,9 +516,9 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, if (txd->cookie > 0) { ret = txd->cookie; + dma_descriptor_unmap(txd); /* Run the link descriptor callback function */ dmaengine_desc_get_callback_invoke(txd, NULL); - dma_descriptor_unmap(txd); } /* Run any dependencies */ From a941106de4434c0173a2c6d5abedb2d1cfc11206 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Jul 2016 10:34:03 -0700 Subject: [PATCH 042/169] dmaengine: fsl_raid: move unmap to before callback Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang Cc: Xuelin Shi Signed-off-by: Vinod Koul --- drivers/dma/fsl_raid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index 35d017a50502..a8c8b9ebd5b4 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -135,8 +135,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan) static void fsl_re_desc_done(struct fsl_re_desc *desc) { dma_cookie_complete(&desc->async_tx); - dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); dma_descriptor_unmap(&desc->async_tx); + dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); } static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan) From 8058e25809f53cadc0438ebb8f920415a0d2ec17 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Jul 2016 10:34:08 -0700 Subject: [PATCH 043/169] dmaengine: mv_xor: move unmap to before callback Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f8b5e7424b3a..d550efbc7054 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -206,11 +206,11 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, if (desc->async_tx.cookie > 0) { cookie = desc->async_tx.cookie; + dma_descriptor_unmap(&desc->async_tx); /* call the callback (must not sleep or submit new * operations to this channel) */ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); - dma_descriptor_unmap(&desc->async_tx); } /* run dependent operations */ From 369dbadac151232960fceb83cb14473e8a4e1a36 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 4 Aug 2016 18:06:13 +0530 Subject: [PATCH 044/169] dmengine: xilinx_dma: convert callback to helper function Move the xilinx driver to new dmaengine callback Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 4e223d094433..8288fe4d17c3 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) spin_lock_irqsave(&chan->lock, flags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { - dma_async_tx_callback callback; - void *callback_param; + struct dmaengine_desc_callback cb; if (desc->cyclic) { xilinx_dma_chan_handle_cyclic(chan, desc, &flags); @@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) list_del(&desc->node); /* Run the link descriptor callback function */ - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - if (callback) { + dmaengine_desc_get_callback(&desc->async_tx, &cb); + if (dmaengine_desc_callback_valid(&cb)) { spin_unlock_irqrestore(&chan->lock, flags); - callback(callback_param); + dmaengine_desc_callback_invoke(&cb, NULL); spin_lock_irqsave(&chan->lock, flags); } From ed9f2c5896baf277959ed91f6b77b03c5de2db0f Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Jul 2016 10:34:14 -0700 Subject: [PATCH 045/169] dmaengine: ppc4xx/adma: move unmap to before callback Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang Signed-off-by: Vinod Koul --- drivers/dma/ppc4xx/adma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 140f3ed429f4..fc71a635edf6 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -1482,11 +1482,11 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( cookie = desc->async_tx.cookie; desc->async_tx.cookie = 0; + dma_descriptor_unmap(&desc->async_tx); /* call the callback (must not sleep or submit new * operations to this channel) */ dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); - dma_descriptor_unmap(&desc->async_tx); } /* run dependent operations */ From 73fc45e3ce7838e6f47228dd51144c492931e8ad Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Wed, 20 Jul 2016 14:13:09 -0700 Subject: [PATCH 046/169] dmaengine: sh_shdma-base: convert callback to helper function This is in preperation of moving to a callback that provides results to the callback for the transaction. The conversion will maintain current behavior and the driver must convert to new callback mechanism at a later time in order to receive results. Signed-off-by: Dave Jiang Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 10fcabad80f3..12fa48e380cf 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) bool head_acked = false; dma_cookie_t cookie = 0; dma_async_tx_callback callback = NULL; - void *param = NULL; + struct dmaengine_desc_callback cb; unsigned long flags; LIST_HEAD(cyclic_list); + memset(&cb, 0, sizeof(cb)); spin_lock_irqsave(&schan->chan_lock, flags); list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { struct dma_async_tx_descriptor *tx = &desc->async_tx; @@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) /* Call callback on the last chunk */ if (desc->mark == DESC_COMPLETED && tx->callback) { desc->mark = DESC_WAITING; + dmaengine_desc_get_callback(tx, &cb); callback = tx->callback; - param = tx->callback_param; dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", tx->cookie, tx, schan->id); BUG_ON(desc->chunks != 1); @@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) spin_unlock_irqrestore(&schan->chan_lock, flags); - if (callback) - callback(param); + dmaengine_desc_callback_invoke(&cb, NULL); return callback; } @@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev) /* Complete all */ list_for_each_entry(sdesc, &dl, node) { struct dma_async_tx_descriptor *tx = &sdesc->async_tx; + sdesc->mark = DESC_IDLE; - if (tx->callback) - tx->callback(tx->callback_param); + dmaengine_desc_get_callback_invoke(tx, NULL); } spin_lock(&schan->chan_lock); From fd3c69bd19244aa8cbf859561fd1b9f4ebc1d1c3 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Mon, 25 Jul 2016 10:34:19 -0700 Subject: [PATCH 047/169] dmaengine: xgene-dma: move unmap to before callback Completion callback should happen after dma_descriptor_unmap() has happened. This allow the cache invalidate to happen and ensure that the data accessed by the upper layer is in memory that was from DMA rather than stale data. On some architecture this is done by the hardware, however we should make the code consistent to not cause confusion. Signed-off-by: Dave Jiang Cc: Rameshwar Prasad Sahu Signed-off-by: Vinod Koul --- drivers/dma/xgene-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index d66ed11baaec..8b693b712d0f 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -606,12 +606,11 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, return; dma_cookie_complete(tx); + dma_descriptor_unmap(tx); /* Run the link descriptor callback function */ dmaengine_desc_get_callback_invoke(tx, NULL); - dma_descriptor_unmap(tx); - /* Run any dependencies */ dma_run_dependencies(tx); } From a5dc3fcac4a29b77e8bd476b8b5242e8f449d3a6 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:27 +0300 Subject: [PATCH 048/169] dmaengine: omap-dma: Simplify omap_dma_start_sg parameter list We can drop the (sg)idx parameter for the omap_dma_start_sg() function and increment the sgidx inside of the same function. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index d99ca2b511c4..2fdeb02e81fc 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -365,10 +365,9 @@ static void omap_dma_stop(struct omap_chan *c) c->running = false; } -static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, - unsigned idx) +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d) { - struct omap_sg *sg = d->sg + idx; + struct omap_sg *sg = d->sg + c->sgidx; unsigned cxsa, cxei, cxfi; if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) { @@ -388,6 +387,7 @@ static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, omap_dma_chan_write(c, CFN, sg->fn); omap_dma_start(c, d); + c->sgidx++; } static void omap_dma_start_desc(struct omap_chan *c) @@ -433,7 +433,7 @@ static void omap_dma_start_desc(struct omap_chan *c) omap_dma_chan_write(c, CSDP, d->csdp); omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); - omap_dma_start_sg(c, d, 0); + omap_dma_start_sg(c, d); } static void omap_dma_callback(int ch, u16 status, void *data) @@ -446,8 +446,8 @@ static void omap_dma_callback(int ch, u16 status, void *data) d = c->desc; if (d) { if (!c->cyclic) { - if (++c->sgidx < d->sglen) { - omap_dma_start_sg(c, d, c->sgidx); + if (c->sgidx < d->sglen) { + omap_dma_start_sg(c, d); } else { omap_dma_start_desc(c); vchan_cookie_complete(&d->vd); From b57ebe080cffb323b062fbff1a35ef59de3bd5d1 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:28 +0300 Subject: [PATCH 049/169] dmaengine: omap-dma: Simplify omap_dma_callback Flatten the indentation level of the function which gives better view on the cases we handle here. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 2fdeb02e81fc..3c445942bd31 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -445,15 +445,13 @@ static void omap_dma_callback(int ch, u16 status, void *data) spin_lock_irqsave(&c->vc.lock, flags); d = c->desc; if (d) { - if (!c->cyclic) { - if (c->sgidx < d->sglen) { - omap_dma_start_sg(c, d); - } else { - omap_dma_start_desc(c); - vchan_cookie_complete(&d->vd); - } - } else { + if (c->cyclic) { vchan_cyclic_callback(&d->vd); + } else if (c->sgidx == d->sglen) { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } else { + omap_dma_start_sg(c, d); } } spin_unlock_irqrestore(&c->vc.lock, flags); From 2d1a9a946faebfedd660a1f1c2b90984fff41f91 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:29 +0300 Subject: [PATCH 050/169] dmaengine: omap-dma: Dynamically allocate memory for lch_map On OMAP1 platforms we do not have 32 channels available. Allocate the lch_map based on the available channels. This way we are not going to have more visible channels then it is available on the platform. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 3c445942bd31..774306d372c9 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -35,7 +35,7 @@ struct omap_dmadev { unsigned dma_requests; spinlock_t irq_lock; uint32_t irq_enable_mask; - struct omap_chan *lch_map[OMAP_SDMA_CHANNELS]; + struct omap_chan **lch_map; }; struct omap_chan { @@ -1223,16 +1223,24 @@ static int omap_dma_probe(struct platform_device *pdev) spin_lock_init(&od->lock); spin_lock_init(&od->irq_lock); - od->dma_requests = OMAP_SDMA_REQUESTS; - if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, - "dma-requests", - &od->dma_requests)) { + if (!pdev->dev.of_node) { + od->dma_requests = od->plat->dma_attr->lch_count; + if (unlikely(!od->dma_requests)) + od->dma_requests = OMAP_SDMA_REQUESTS; + } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", + &od->dma_requests)) { dev_info(&pdev->dev, "Missing dma-requests property, using %u.\n", OMAP_SDMA_REQUESTS); + od->dma_requests = OMAP_SDMA_REQUESTS; } - for (i = 0; i < OMAP_SDMA_CHANNELS; i++) { + od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, + sizeof(*od->lch_map), GFP_KERNEL); + if (!od->lch_map) + return -ENOMEM; + + for (i = 0; i < od->dma_requests; i++) { rc = omap_dma_chan_init(od); if (rc) { omap_dma_free(od); From 3c9b833f5bfff704b805f31e818d6be8b804955f Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:30 +0300 Subject: [PATCH 051/169] dmaengine: omap-dma: Add more debug information when freeing channel Print the same information the driver prints when allocating the channel resources regarding to the sDMA channel. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 774306d372c9..e58eda564060 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -568,7 +568,8 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); omap_free_dma(c->dma_ch); - dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); + dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch, + c->dma_sig); c->dma_sig = 0; } From cb7958dfa9bc4a69f94eaa8bbc9d21a291a2a560 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:31 +0300 Subject: [PATCH 052/169] dmaengine: omap-dma: Use pointer to omap_sg in slave_sg setup's loop Instead of accessing the array via index, take the pointer first and use it to set up the omap_sg struct. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index e58eda564060..6e8e28955ca6 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -819,9 +819,11 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( en = burst; frame_bytes = es_bytes[es] * en; for_each_sg(sgl, sgent, sglen, i) { - d->sg[i].addr = sg_dma_address(sgent); - d->sg[i].en = en; - d->sg[i].fn = sg_dma_len(sgent) / frame_bytes; + struct omap_sg *osg = &d->sg[i]; + + osg->addr = sg_dma_address(sgent); + osg->en = en; + osg->fn = sg_dma_len(sgent) / frame_bytes; } d->sglen = sglen; From 1c2e8e6b6429688e6b1096db1a89a60faaa6d8dc Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 20 Jul 2016 11:50:32 +0300 Subject: [PATCH 053/169] dmaengine: omap-dma: Support for LinkedList transfer of slave_sg sDMA in OMAP3630 or newer SoC have support for LinkedList transfer. When LinkedList or Descriptor load feature is present we can create the descriptors for each and program sDMA to walk through the list of descriptors instead of the current way of sDMA stop, sDMA reconfiguration and sDMA start after each SG transfer. By using LinkedList transfer in sDMA the number of DMA interrupts will decrease dramatically. Booting up the board with filesystem on SD card for example: W/o LinkedList support: 27: 4436 0 WUGEN 13 Level omap-dma-engine Same board/filesystem with this patch: 27: 1027 0 WUGEN 13 Level omap-dma-engine Or copying files from SD card to eMCC: 2.1G /usr/ 232001 W/o LinkedList we see ~761069 DMA interrupts. With LinkedList support it is down to ~269314 DMA interrupts. With the decreased DMA interrupt number the CPU load is dropping significantly as well. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 183 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 177 insertions(+), 6 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 6e8e28955ca6..aef52011d7c0 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,8 @@ struct omap_dmadev { const struct omap_dma_reg *reg_map; struct omap_system_dma_plat_info *plat; bool legacy; + bool ll123_supported; + struct dma_pool *desc_pool; unsigned dma_requests; spinlock_t irq_lock; uint32_t irq_enable_mask; @@ -55,16 +58,40 @@ struct omap_chan { unsigned sgidx; }; +#define DESC_NXT_SV_REFRESH (0x1 << 24) +#define DESC_NXT_SV_REUSE (0x2 << 24) +#define DESC_NXT_DV_REFRESH (0x1 << 26) +#define DESC_NXT_DV_REUSE (0x2 << 26) +#define DESC_NTYPE_TYPE2 (0x2 << 29) + +/* Type 2 descriptor with Source or Destination address update */ +struct omap_type2_desc { + uint32_t next_desc; + uint32_t en; + uint32_t addr; /* src or dst */ + uint16_t fn; + uint16_t cicr; + uint16_t cdei; + uint16_t csei; + uint32_t cdfi; + uint32_t csfi; +} __packed; + struct omap_sg { dma_addr_t addr; uint32_t en; /* number of elements (24-bit) */ uint32_t fn; /* number of frames (16-bit) */ int32_t fi; /* for double indexing */ int16_t ei; /* for double indexing */ + + /* Linked list */ + struct omap_type2_desc *t2_desc; + dma_addr_t t2_desc_paddr; }; struct omap_desc { struct virt_dma_desc vd; + bool using_ll; enum dma_transfer_direction dir; dma_addr_t dev_addr; @@ -81,6 +108,9 @@ struct omap_desc { }; enum { + CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */ + CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */ + CCR_FS = BIT(5), CCR_READ_PRIORITY = BIT(6), CCR_ENABLE = BIT(7), @@ -151,6 +181,19 @@ enum { CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ CLNK_CTRL_ENABLE_LNK = BIT(15), + + CDP_DST_VALID_INC = 0 << 0, + CDP_DST_VALID_RELOAD = 1 << 0, + CDP_DST_VALID_REUSE = 2 << 0, + CDP_SRC_VALID_INC = 0 << 2, + CDP_SRC_VALID_RELOAD = 1 << 2, + CDP_SRC_VALID_REUSE = 2 << 2, + CDP_NTYPE_TYPE1 = 1 << 4, + CDP_NTYPE_TYPE2 = 2 << 4, + CDP_NTYPE_TYPE3 = 3 << 4, + CDP_TMODE_NORMAL = 0 << 8, + CDP_TMODE_LLIST = 1 << 8, + CDP_FAST = BIT(10), }; static const unsigned es_bytes[] = { @@ -180,7 +223,64 @@ static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor static void omap_dma_desc_free(struct virt_dma_desc *vd) { - kfree(container_of(vd, struct omap_desc, vd)); + struct omap_desc *d = to_omap_dma_desc(&vd->tx); + + if (d->using_ll) { + struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device); + int i; + + for (i = 0; i < d->sglen; i++) { + if (d->sg[i].t2_desc) + dma_pool_free(od->desc_pool, d->sg[i].t2_desc, + d->sg[i].t2_desc_paddr); + } + } + + kfree(d); +} + +static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx, + enum dma_transfer_direction dir, bool last) +{ + struct omap_sg *sg = &d->sg[idx]; + struct omap_type2_desc *t2_desc = sg->t2_desc; + + if (idx) + d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr; + if (last) + t2_desc->next_desc = 0xfffffffc; + + t2_desc->en = sg->en; + t2_desc->addr = sg->addr; + t2_desc->fn = sg->fn & 0xffff; + t2_desc->cicr = d->cicr; + if (!last) + t2_desc->cicr &= ~CICR_BLOCK_IE; + + switch (dir) { + case DMA_DEV_TO_MEM: + t2_desc->cdei = sg->ei; + t2_desc->csei = d->ei; + t2_desc->cdfi = sg->fi; + t2_desc->csfi = d->fi; + + t2_desc->en |= DESC_NXT_DV_REFRESH; + t2_desc->en |= DESC_NXT_SV_REUSE; + break; + case DMA_MEM_TO_DEV: + t2_desc->cdei = d->ei; + t2_desc->csei = sg->ei; + t2_desc->cdfi = d->fi; + t2_desc->csfi = sg->fi; + + t2_desc->en |= DESC_NXT_SV_REFRESH; + t2_desc->en |= DESC_NXT_DV_REUSE; + break; + default: + return; + } + + t2_desc->en |= DESC_NTYPE_TYPE2; } static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) @@ -285,6 +385,7 @@ static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) { struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); + uint16_t cicr = d->cicr; if (__dma_omap15xx(od->plat->dma_attr)) omap_dma_chan_write(c, CPC, 0); @@ -293,8 +394,27 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) omap_dma_clear_csr(c); + if (d->using_ll) { + uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST; + + if (d->dir == DMA_DEV_TO_MEM) + cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE); + else + cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD); + omap_dma_chan_write(c, CDP, cdp); + + omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr); + omap_dma_chan_write(c, CCDN, 0); + omap_dma_chan_write(c, CCFN, 0xffff); + omap_dma_chan_write(c, CCEN, 0xffffff); + + cicr &= ~CICR_BLOCK_IE; + } else if (od->ll123_supported) { + omap_dma_chan_write(c, CDP, 0); + } + /* Enable interrupts */ - omap_dma_chan_write(c, CICR, d->cicr); + omap_dma_chan_write(c, CICR, cicr); /* Enable channel */ omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); @@ -447,7 +567,7 @@ static void omap_dma_callback(int ch, u16 status, void *data) if (d) { if (c->cyclic) { vchan_cyclic_callback(&d->vd); - } else if (c->sgidx == d->sglen) { + } else if (d->using_ll || c->sgidx == d->sglen) { omap_dma_start_desc(c); vchan_cookie_complete(&d->vd); } else { @@ -501,6 +621,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); + struct device *dev = od->ddev.dev; int ret; if (od->legacy) { @@ -511,8 +632,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan) &c->dma_ch); } - dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", - c->dma_ch, c->dma_sig); + dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig); if (ret >= 0) { omap_dma_assign(od, c, c->dma_ch); @@ -743,6 +863,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( struct omap_desc *d; dma_addr_t dev_addr; unsigned i, es, en, frame_bytes; + bool ll_failed = false; u32 burst; if (dir == DMA_DEV_TO_MEM) { @@ -818,16 +939,47 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( */ en = burst; frame_bytes = es_bytes[es] * en; + + if (sglen >= 2) + d->using_ll = od->ll123_supported; + for_each_sg(sgl, sgent, sglen, i) { struct omap_sg *osg = &d->sg[i]; osg->addr = sg_dma_address(sgent); osg->en = en; osg->fn = sg_dma_len(sgent) / frame_bytes; + + if (d->using_ll) { + osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC, + &osg->t2_desc_paddr); + if (!osg->t2_desc) { + dev_err(chan->device->dev, + "t2_desc[%d] allocation failed\n", i); + ll_failed = true; + d->using_ll = false; + continue; + } + + omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1)); + } } d->sglen = sglen; + /* Release the dma_pool entries if one allocation failed */ + if (ll_failed) { + for (i = 0; i < d->sglen; i++) { + struct omap_sg *osg = &d->sg[i]; + + if (osg->t2_desc) { + dma_pool_free(od->desc_pool, osg->t2_desc, + osg->t2_desc_paddr); + osg->t2_desc = NULL; + } + } + } + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); } @@ -1266,10 +1418,25 @@ static int omap_dma_probe(struct platform_device *pdev) return rc; } + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) + od->ll123_supported = true; + od->ddev.filter.map = od->plat->slave_map; od->ddev.filter.mapcnt = od->plat->slavecnt; od->ddev.filter.fn = omap_dma_filter_fn; + if (od->ll123_supported) { + od->desc_pool = dma_pool_create(dev_name(&pdev->dev), + &pdev->dev, + sizeof(struct omap_type2_desc), + 4, 0); + if (!od->desc_pool) { + dev_err(&pdev->dev, + "unable to allocate descriptor pool\n"); + od->ll123_supported = false; + } + } + rc = dma_async_device_register(&od->ddev); if (rc) { pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", @@ -1293,7 +1460,8 @@ static int omap_dma_probe(struct platform_device *pdev) } } - dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + dev_info(&pdev->dev, "OMAP DMA engine driver%s\n", + od->ll123_supported ? " (LinkedList1/2/3 supported)" : ""); return rc; } @@ -1316,6 +1484,9 @@ static int omap_dma_remove(struct platform_device *pdev) omap_dma_glbl_write(od, IRQENABLE_L0, 0); } + if (od->ll123_supported) + dma_pool_destroy(od->desc_pool); + omap_dma_free(od); return 0; From e4b75760faac2b858e0b244ce716bf30a239ae2f Mon Sep 17 00:00:00 2001 From: Martin Kaiser Date: Mon, 8 Aug 2016 22:45:58 +0200 Subject: [PATCH 054/169] dmaengine: imx-sdma: (trivial) fix a typo Signed-off-by: Martin Kaiser Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 03ec76fc22ff..5f55c5a6708b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -184,7 +184,7 @@ struct sdma_mode_count { u32 count : 16; /* size of the buffer pointed by this BD */ u32 status : 8; /* E,R,I,C,W,D status bits stored here */ - u32 command : 8; /* command mostlky used for channel 0 */ + u32 command : 8; /* command mostly used for channel 0 */ }; /* From 76d7b84bfa43f514544477d2282f9ac9796a2594 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 27 Jul 2016 14:32:58 -0700 Subject: [PATCH 055/169] dmaengine: device must have at least one channel The DMA device can't be registered if it doesn't have any channels registered at all. Moreover, it leads to memory leak and is reported by kmemleak as (on 3.10 kernel, and same shall happen on mainline): unreferenced object 0xffffffc09e597240 (size 64): comm "swapper/0", pid 1, jiffies 4294877736 (age 7060.280s) hex dump (first 32 bytes): 00 00 00 00 c0 ff ff ff 30 00 00 ff 00 00 00 ff ........0....... 00 00 00 ff 00 00 00 ff 00 00 00 ff 00 00 00 ff ................ backtrace: [] create_object+0x148/0x2a0 [] kmemleak_alloc+0x80/0xbc [] kmem_cache_alloc_trace+0x120/0x1ac [] dma_async_device_register+0x160/0x46c [] foo_probe+0x1a0/0x264 [] platform_drv_probe+0x14/0x20 [] driver_probe_device+0x160/0x374 [] __driver_attach+0x60/0x90 [] bus_for_each_dev+0x7c/0xb0 [] driver_attach+0x1c/0x28 [] bus_add_driver+0x124/0x248 [] driver_register+0x90/0x110 [] platform_driver_register+0x58/0x64 [] foo_driver_init+0x10/0x1c [] do_one_initcall+0xac/0x148 [] kernel_init_freeable+0x1a0/0x258 Return -ENODEV from dma_async_device_register() on such a case. Signed-off-by: Viresh Kumar Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8c9f45fd55fc..6b535262ac5d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -997,6 +997,13 @@ int dma_async_device_register(struct dma_device *device) } chan->client_count = 0; } + + if (!chancnt) { + dev_err(device->dev, "%s: device has no channels!\n", __func__); + rc = -ENODEV; + goto err_out; + } + device->chancnt = chancnt; mutex_lock(&dma_list_mutex); From 24fec75017ae2c9ecc5db71feb999f73d57ba04f Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Thu, 11 Aug 2016 11:42:20 +0100 Subject: [PATCH 056/169] dmaengine: tegra210-adma: Update driver to use of_pm_clk_add_clk Commit 498b5fdd40dd ("PM / clk: Add support for adding a specific clock from device-tree") add a new helper function for adding a clock from device-tree to a device. Update the ADMA driver to use this new function to simplify the driver. Signed-off-by: Jon Hunter Acked-by: Laxman Dewangan Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index c4b121c4559d..b10cbaa82ff5 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -670,7 +670,6 @@ static int tegra_adma_probe(struct platform_device *pdev) const struct tegra_adma_chip_data *cdata; struct tegra_adma *tdma; struct resource *res; - struct clk *clk; int ret, i; cdata = of_device_get_match_data(&pdev->dev); @@ -697,18 +696,9 @@ static int tegra_adma_probe(struct platform_device *pdev) if (ret) return ret; - clk = clk_get(&pdev->dev, "d_audio"); - if (IS_ERR(clk)) { - dev_err(&pdev->dev, "ADMA clock not found\n"); - ret = PTR_ERR(clk); + ret = of_pm_clk_add_clk(&pdev->dev, "d_audio"); + if (ret) goto clk_destroy; - } - - ret = pm_clk_add_clk(&pdev->dev, clk); - if (ret) { - clk_put(clk); - goto clk_destroy; - } pm_runtime_enable(&pdev->dev); From b7d2648ac3d4da3ae27f65f14958d2130cdf30ac Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 10 Aug 2016 13:05:05 -0300 Subject: [PATCH 057/169] dmaengine: imx-sdma: Add i.MX7 support Allow i.MX7 to work with the imx-sdma driver. Signed-off-by: Fabio Estevam Tested-by: Stefan Agner Signed-off-by: Vinod Koul --- .../devicetree/bindings/dma/fsl-imx-sdma.txt | 1 + drivers/dma/imx-sdma.c | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt index 175f0e44ed85..3c9a57a8443b 100644 --- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt @@ -8,6 +8,7 @@ Required properties: "fsl,imx51-sdma" "fsl,imx53-sdma" "fsl,imx6q-sdma" + "fsl,imx7d-sdma" The -to variants should be preferred since they allow to determine the correct ROM script addresses needed for the driver to work without additional firmware. diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 5f55c5a6708b..6eade3828aca 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -479,6 +479,24 @@ static struct sdma_driver_data sdma_imx6q = { .script_addrs = &sdma_script_imx6q, }; +static struct sdma_script_start_addrs sdma_script_imx7d = { + .ap_2_ap_addr = 644, + .uart_2_mcu_addr = 819, + .mcu_2_app_addr = 749, + .uartsh_2_mcu_addr = 1034, + .mcu_2_shp_addr = 962, + .app_2_mcu_addr = 685, + .shp_2_mcu_addr = 893, + .spdif_2_mcu_addr = 1102, + .mcu_2_spdif_addr = 1136, +}; + +static struct sdma_driver_data sdma_imx7d = { + .chnenbl0 = SDMA_CHNENBL0_IMX35, + .num_events = 48, + .script_addrs = &sdma_script_imx7d, +}; + static const struct platform_device_id sdma_devtypes[] = { { .name = "imx25-sdma", @@ -498,6 +516,9 @@ static const struct platform_device_id sdma_devtypes[] = { }, { .name = "imx6q-sdma", .driver_data = (unsigned long)&sdma_imx6q, + }, { + .name = "imx7d-sdma", + .driver_data = (unsigned long)&sdma_imx7d, }, { /* sentinel */ } @@ -511,6 +532,7 @@ static const struct of_device_id sdma_dt_ids[] = { { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, + { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sdma_dt_ids); @@ -1375,6 +1397,7 @@ static void sdma_issue_pending(struct dma_chan *chan) #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -1424,6 +1447,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) case 3: sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; break; + case 4: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4; + break; default: dev_err(sdma->dev, "unknown firmware version\n"); goto err_firmware; From 6c28a90fb30cd3fe504e7dafe0b60b733f240f8c Mon Sep 17 00:00:00 2001 From: Andy Green Date: Mon, 29 Aug 2016 10:30:47 -0700 Subject: [PATCH 058/169] k3dma: Fix hisi burst clipping Max burst len is a 4-bit field, but at the moment it's clipped with a 5-bit constant... reduce it to that which can be expressed Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: Andy Green [jstultz: Forward ported to mainline] Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 39de8980128c..3d514692bdc6 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -551,7 +551,7 @@ static int k3_dma_config(struct dma_chan *chan, c->ccfg |= (val << 12) | (val << 16); if ((maxburst == 0) || (maxburst > 16)) - val = 16; + val = 15; else val = maxburst - 1; c->ccfg |= (val << 20) | (val << 24); From aceaaa17e795b963880d71a03ab1ca9f4f597185 Mon Sep 17 00:00:00 2001 From: Andy Green Date: Mon, 29 Aug 2016 10:30:48 -0700 Subject: [PATCH 059/169] k3dma: Fix dma err offsets The offsets for ERR1 and ERR2 are wrong actually. That's why you can never clear an error. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: Andy Green [jstultz: Forward ported to mainline] Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 3d514692bdc6..7dc7816d2a0c 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -34,8 +34,8 @@ #define INT_ERR1_MASK 0x20 #define INT_ERR2_MASK 0x24 #define INT_TC1_RAW 0x600 -#define INT_ERR1_RAW 0x608 -#define INT_ERR2_RAW 0x610 +#define INT_ERR1_RAW 0x610 +#define INT_ERR2_RAW 0x618 #define CH_PRI 0x688 #define CH_STAT 0x690 #define CX_CUR_CNT 0x704 From 0173c895ed83f4654e7d6535088973725e76f304 Mon Sep 17 00:00:00 2001 From: Andy Green Date: Mon, 29 Aug 2016 10:30:49 -0700 Subject: [PATCH 060/169] k3dma: Fix "nobody cared" message seen on any error As it was before, as soon as the DMAC IP felt there was an error he would return IRQ_NONE since no actual transfer had completed. After spinning on that for 100K interrupts, Linux yanks the IRQ with a "nobody cared" error. This patch lets it handle the interrupt and keep the IRQ alive. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: Andy Green [jstultz: Forward ported to mainline] Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 7dc7816d2a0c..f46b9b86fc9b 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -221,11 +221,13 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) writel_relaxed(err1, d->base + INT_ERR1_RAW); writel_relaxed(err2, d->base + INT_ERR2_RAW); - if (irq_chan) { + if (irq_chan) tasklet_schedule(&d->task); + + if (irq_chan || err1 || err2) return IRQ_HANDLED; - } else - return IRQ_NONE; + + return IRQ_NONE; } static int k3_dma_start_txd(struct k3_dma_chan *c) From b77f262ae351d467c22b056f6d13afeeab7ea69a Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 29 Aug 2016 10:30:50 -0700 Subject: [PATCH 061/169] k3dma: Fix occasional DMA ERR issue by using proper dma api After lots of debugging on an occasional DMA ERR issue, I realized that the desc structures which we point the dma hardware are being allocated out of regular memory. This means when we fill the desc structures, that data doesn't always get flushed out to memory by the time we start the dma transfer, resulting in the dma engine getting some null values, resulting in a DMA ERR on the first irq. Thus, this patch adopts mechanism similar to the zx296702_dma of allocating the desc structures from a dma pool, so the memory caching rules are properly set to avoid this issue. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: John Stutlz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 53 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 8 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index f46b9b86fc9b..9d96c956c25f 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -8,6 +8,8 @@ */ #include #include +#include +#include #include #include #include @@ -25,6 +27,7 @@ #define DRIVER_NAME "k3-dma" #define DMA_MAX_SIZE 0x1ffc +#define LLI_BLOCK_SIZE (4 * PAGE_SIZE) #define INT_STAT 0x00 #define INT_TC1 0x04 @@ -68,7 +71,7 @@ struct k3_dma_desc_sw { dma_addr_t desc_hw_lli; size_t desc_num; size_t size; - struct k3_desc_hw desc_hw[0]; + struct k3_desc_hw *desc_hw; }; struct k3_dma_phy; @@ -100,6 +103,7 @@ struct k3_dma_dev { struct k3_dma_phy *phy; struct k3_dma_chan *chans; struct clk *clk; + struct dma_pool *pool; u32 dma_channels; u32 dma_requests; unsigned int irq; @@ -414,6 +418,35 @@ static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, ds->desc_hw[num].config = ccfg; } +static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num, + struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_desc_sw *ds; + struct k3_dma_dev *d = to_k3_dma(chan->device); + int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw); + + if (num > lli_limit) { + dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", + &c->vc, num, lli_limit); + return NULL; + } + + ds = kzalloc(sizeof(*ds), GFP_NOWAIT); + if (!ds) + return NULL; + + ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); + if (!ds->desc_hw) { + dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); + kfree(ds); + return NULL; + } + memset(ds->desc_hw, 0, sizeof(struct k3_desc_hw) * num); + ds->desc_num = num; + return ds; +} + static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) @@ -427,13 +460,12 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( return NULL; num = DIV_ROUND_UP(len, DMA_MAX_SIZE); - ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); + + ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; - ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); ds->size = len; - ds->desc_num = num; num = 0; if (!c->ccfg) { @@ -482,12 +514,9 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; } - ds = kzalloc(sizeof(*ds) + num * sizeof(ds->desc_hw[0]), GFP_ATOMIC); + ds = k3_dma_alloc_desc_resource(num, chan); if (!ds) return NULL; - - ds->desc_hw_lli = __virt_to_phys((unsigned long)&ds->desc_hw[0]); - ds->desc_num = num; num = 0; for_each_sg(sgl, sg, sglen, i) { @@ -645,7 +674,9 @@ static void k3_dma_free_desc(struct virt_dma_desc *vd) { struct k3_dma_desc_sw *ds = container_of(vd, struct k3_dma_desc_sw, vd); + struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); + dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); kfree(ds); } @@ -708,6 +739,12 @@ static int k3_dma_probe(struct platform_device *op) d->irq = irq; + /* A DMA memory pool for LLIs, align on 32-byte boundary */ + d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, + LLI_BLOCK_SIZE, 32, 0); + if (!d->pool) + return -ENOMEM; + /* init phy channel */ d->phy = devm_kzalloc(&op->dev, d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL); From 36387a2b1f62b5c087c5fe6f0f7b23b94f722ad7 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 29 Aug 2016 10:30:51 -0700 Subject: [PATCH 062/169] k3dma: Fix memory handling in preparation for cyclic mode With cyclic mode, the shared virt-dma logic doesn't actually manage the descriptor state, nor the calling of the descriptor free callback. This results in leaking a desc structure every time we start an audio transfer. Thus we must manage it ourselves. The k3dma driver already keeps track of the active and finished descriptors via ds_run and ds_done pointers, so cleanup how we handle those two values, so when we tear down everything in terminate_all, call free_desc on the ds_run and ds_done pointers if they are not null. NOTE: HiKey doesn't use the non-cyclic dma modes, so I'm not been able to test those modes. But with this patch we no longer leak the desc structures. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 9d96c956c25f..8108fa119d39 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -212,7 +212,9 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) spin_lock_irqsave(&c->vc.lock, flags); vchan_cookie_complete(&p->ds_run->vd); + WARN_ON_ONCE(p->ds_done); p->ds_done = p->ds_run; + p->ds_run = NULL; spin_unlock_irqrestore(&c->vc.lock, flags); } irq_chan |= BIT(i); @@ -253,14 +255,14 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) * so vc->desc_issued only contains desc pending */ list_del(&ds->vd.node); + + WARN_ON_ONCE(c->phy->ds_run); + WARN_ON_ONCE(c->phy->ds_done); c->phy->ds_run = ds; - c->phy->ds_done = NULL; /* start dma */ k3_dma_set_desc(c->phy, &ds->desc_hw[0]); return 0; } - c->phy->ds_done = NULL; - c->phy->ds_run = NULL; return -EAGAIN; } @@ -594,6 +596,16 @@ static int k3_dma_config(struct dma_chan *chan, return 0; } +static void k3_dma_free_desc(struct virt_dma_desc *vd) +{ + struct k3_dma_desc_sw *ds = + container_of(vd, struct k3_dma_desc_sw, vd); + struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); + + dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); + kfree(ds); +} + static int k3_dma_terminate_all(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); @@ -617,7 +629,15 @@ static int k3_dma_terminate_all(struct dma_chan *chan) k3_dma_terminate_chan(p, d); c->phy = NULL; p->vchan = NULL; - p->ds_run = p->ds_done = NULL; + if (p->ds_run) { + k3_dma_free_desc(&p->ds_run->vd); + p->ds_run = NULL; + } + if (p->ds_done) { + k3_dma_free_desc(&p->ds_done->vd); + p->ds_done = NULL; + } + } spin_unlock_irqrestore(&c->vc.lock, flags); vchan_dma_desc_free_list(&c->vc, &head); @@ -670,16 +690,6 @@ static int k3_dma_transfer_resume(struct dma_chan *chan) return 0; } -static void k3_dma_free_desc(struct virt_dma_desc *vd) -{ - struct k3_dma_desc_sw *ds = - container_of(vd, struct k3_dma_desc_sw, vd); - struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device); - - dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); - kfree(ds); -} - static const struct of_device_id k3_pdma_dt_ids[] = { { .compatible = "hisilicon,k3-dma-1.0", }, {} From a7e08fa6cc783cae797a06114d18dec73fac08b3 Mon Sep 17 00:00:00 2001 From: Andy Green Date: Mon, 29 Aug 2016 10:30:52 -0700 Subject: [PATCH 063/169] k3dma: Add cyclic mode for audio Currently the k3dma driver doesn't offer the cyclic mode necessary for handling audio. This patch adds it. Cc: Zhangfei Gao Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: Andy Green [jstultz: Forward ported to mainline, removed a few bits of logic that didn't seem to have much effect] Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 114 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 103 insertions(+), 11 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 8108fa119d39..814e6e04e15c 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Linaro Ltd. + * Copyright (c) 2013 - 2015 Linaro Ltd. * Copyright (c) 2013 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify @@ -27,23 +27,28 @@ #define DRIVER_NAME "k3-dma" #define DMA_MAX_SIZE 0x1ffc +#define DMA_CYCLIC_MAX_PERIOD 0x1000 #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) #define INT_STAT 0x00 #define INT_TC1 0x04 +#define INT_TC2 0x08 #define INT_ERR1 0x0c #define INT_ERR2 0x10 #define INT_TC1_MASK 0x18 +#define INT_TC2_MASK 0x1c #define INT_ERR1_MASK 0x20 #define INT_ERR2_MASK 0x24 #define INT_TC1_RAW 0x600 +#define INT_TC2_RAW 0x608 #define INT_ERR1_RAW 0x610 #define INT_ERR2_RAW 0x618 #define CH_PRI 0x688 #define CH_STAT 0x690 #define CX_CUR_CNT 0x704 #define CX_LLI 0x800 -#define CX_CNT 0x810 +#define CX_CNT1 0x80c +#define CX_CNT0 0x810 #define CX_SRC 0x814 #define CX_DST 0x818 #define CX_CFG 0x81c @@ -52,6 +57,7 @@ #define CX_LLI_CHAIN_EN 0x2 #define CX_CFG_EN 0x1 +#define CX_CFG_NODEIRQ BIT(1) #define CX_CFG_MEM2PER (0x1 << 2) #define CX_CFG_PER2MEM (0x2 << 2) #define CX_CFG_SRCINCR (0x1 << 31) @@ -84,6 +90,7 @@ struct k3_dma_chan { enum dma_transfer_direction dir; dma_addr_t dev_addr; enum dma_status status; + bool cyclic; }; struct k3_dma_phy { @@ -139,6 +146,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) val = 0x1 << phy->idx; writel_relaxed(val, d->base + INT_TC1_RAW); + writel_relaxed(val, d->base + INT_TC2_RAW); writel_relaxed(val, d->base + INT_ERR1_RAW); writel_relaxed(val, d->base + INT_ERR2_RAW); } @@ -146,7 +154,7 @@ static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d) static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) { writel_relaxed(hw->lli, phy->base + CX_LLI); - writel_relaxed(hw->count, phy->base + CX_CNT); + writel_relaxed(hw->count, phy->base + CX_CNT0); writel_relaxed(hw->saddr, phy->base + CX_SRC); writel_relaxed(hw->daddr, phy->base + CX_DST); writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); @@ -180,11 +188,13 @@ static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on) /* unmask irq */ writel_relaxed(0xffff, d->base + INT_TC1_MASK); + writel_relaxed(0xffff, d->base + INT_TC2_MASK); writel_relaxed(0xffff, d->base + INT_ERR1_MASK); writel_relaxed(0xffff, d->base + INT_ERR2_MASK); } else { /* mask irq */ writel_relaxed(0x0, d->base + INT_TC1_MASK); + writel_relaxed(0x0, d->base + INT_TC2_MASK); writel_relaxed(0x0, d->base + INT_ERR1_MASK); writel_relaxed(0x0, d->base + INT_ERR2_MASK); } @@ -197,19 +207,20 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) struct k3_dma_chan *c; u32 stat = readl_relaxed(d->base + INT_STAT); u32 tc1 = readl_relaxed(d->base + INT_TC1); + u32 tc2 = readl_relaxed(d->base + INT_TC2); u32 err1 = readl_relaxed(d->base + INT_ERR1); u32 err2 = readl_relaxed(d->base + INT_ERR2); u32 i, irq_chan = 0; while (stat) { i = __ffs(stat); - stat &= (stat - 1); - if (likely(tc1 & BIT(i))) { + stat &= ~BIT(i); + if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) { + unsigned long flags; + p = &d->phy[i]; c = p->vchan; - if (c) { - unsigned long flags; - + if (c && (tc1 & BIT(i))) { spin_lock_irqsave(&c->vc.lock, flags); vchan_cookie_complete(&p->ds_run->vd); WARN_ON_ONCE(p->ds_done); @@ -217,6 +228,12 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) p->ds_run = NULL; spin_unlock_irqrestore(&c->vc.lock, flags); } + if (c && (tc2 & BIT(i))) { + spin_lock_irqsave(&c->vc.lock, flags); + if (p->ds_run != NULL) + vchan_cyclic_callback(&p->ds_run->vd); + spin_unlock_irqrestore(&c->vc.lock, flags); + } irq_chan |= BIT(i); } if (unlikely((err1 & BIT(i)) || (err2 & BIT(i)))) @@ -224,6 +241,7 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) } writel_relaxed(irq_chan, d->base + INT_TC1_RAW); + writel_relaxed(irq_chan, d->base + INT_TC2_RAW); writel_relaxed(err1, d->base + INT_ERR1_RAW); writel_relaxed(err2, d->base + INT_ERR2_RAW); @@ -359,7 +377,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, * its total size. */ vd = vchan_find_desc(&c->vc, cookie); - if (vd) { + if (vd && !c->cyclic) { bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size; } else if ((!p) || (!p->ds_run)) { bytes = 0; @@ -369,7 +387,8 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan, bytes = k3_dma_get_curr_cnt(d, p); clli = k3_dma_get_curr_lli(p); - index = (clli - ds->desc_hw_lli) / sizeof(struct k3_desc_hw); + index = ((clli - ds->desc_hw_lli) / + sizeof(struct k3_desc_hw)) + 1; for (; index < ds->desc_num; index++) { bytes += ds->desc_hw[index].count; /* end of lli */ @@ -410,9 +429,10 @@ static void k3_dma_issue_pending(struct dma_chan *chan) static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst, dma_addr_t src, size_t len, u32 num, u32 ccfg) { - if ((num + 1) < ds->desc_num) + if (num != ds->desc_num - 1) ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * sizeof(struct k3_desc_hw); + ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN; ds->desc_hw[num].count = len; ds->desc_hw[num].saddr = src; @@ -467,6 +487,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( if (!ds) return NULL; + c->cyclic = 0; ds->size = len; num = 0; @@ -510,6 +531,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( if (sgl == NULL) return NULL; + c->cyclic = 0; + for_each_sg(sgl, sg, sglen, i) { avail = sg_dma_len(sg); if (avail > DMA_MAX_SIZE) @@ -549,6 +572,73 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( return vchan_tx_prep(&c->vc, &ds->vd, flags); } +static struct dma_async_tx_descriptor * +k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, + unsigned long flags) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_desc_sw *ds; + size_t len, avail, total = 0; + dma_addr_t addr, src = 0, dst = 0; + int num = 1, since = 0; + size_t modulo = DMA_CYCLIC_MAX_PERIOD; + u32 en_tc2 = 0; + + dev_dbg(chan->device->dev, "%s: buf %p, dst %p, buf len %d, period_len = %d, dir %d\n", + __func__, (void *)buf_addr, (void *)to_k3_chan(chan)->dev_addr, + (int)buf_len, (int)period_len, (int)dir); + + avail = buf_len; + if (avail > modulo) + num += DIV_ROUND_UP(avail, modulo) - 1; + + ds = k3_dma_alloc_desc_resource(num, chan); + if (!ds) + return NULL; + + c->cyclic = 1; + addr = buf_addr; + avail = buf_len; + total = avail; + num = 0; + + if (period_len < modulo) + modulo = period_len; + + do { + len = min_t(size_t, avail, modulo); + + if (dir == DMA_MEM_TO_DEV) { + src = addr; + dst = c->dev_addr; + } else if (dir == DMA_DEV_TO_MEM) { + src = c->dev_addr; + dst = addr; + } + since += len; + if (since >= period_len) { + /* descriptor asks for TC2 interrupt on completion */ + en_tc2 = CX_CFG_NODEIRQ; + since -= period_len; + } else + en_tc2 = 0; + + k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2); + + addr += len; + avail -= len; + } while (avail); + + /* "Cyclic" == end of link points back to start of link */ + ds->desc_hw[num - 1].lli |= ds->desc_hw_lli; + + ds->size = total; + + return vchan_tx_prep(&c->vc, &ds->vd, flags); +} + static int k3_dma_config(struct dma_chan *chan, struct dma_slave_config *cfg) { @@ -771,11 +861,13 @@ static int k3_dma_probe(struct platform_device *op) INIT_LIST_HEAD(&d->slave.channels); dma_cap_set(DMA_SLAVE, d->slave.cap_mask); dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.dev = &op->dev; d->slave.device_free_chan_resources = k3_dma_free_chan_resources; d->slave.device_tx_status = k3_dma_tx_status; d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic; d->slave.device_issue_pending = k3_dma_issue_pending; d->slave.device_config = k3_dma_config; d->slave.device_pause = k3_dma_transfer_pause; From e39a2329cfb09072bbab3c1310efc9ff6b7c3aa9 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 29 Aug 2016 10:30:53 -0700 Subject: [PATCH 064/169] Kconfig: Allow k3dma driver to be selected for more then HISI3xx platforms This allows the k3dma driver to be selected on HiKey via the ARCH_HISI dependency. Cc: Zhangfei Gao Cc: Jingoo Han Cc: Krzysztof Kozlowski Cc: Maxime Ripard Cc: Vinod Koul Cc: Dan Williams Cc: Mark Brown Cc: Andy Green Acked-by: Zhangfei Gao Signed-off-by: John Stultz Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 739f797b40d9..fe2dbb811e19 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -279,7 +279,7 @@ config INTEL_MIC_X100_DMA config K3_DMA tristate "Hisilicon K3 DMA support" - depends on ARCH_HI3xxx + depends on ARCH_HI3xxx || ARCH_HISI || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 670fc2a87013c3733868094c3ea115250398f2ea Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 19 Aug 2016 15:59:39 -0700 Subject: [PATCH 065/169] dmaengine: cppi41: Prepare to add PM runtime support Let's just move code from cppi41_dma_issue_pending() to push_desc_queue() as that's the only call to push_desc_queue(). We want to do this for PM runtime as we need to call push_desc_queue() also for pending queued transfers from PM runtime resume. No functional changes, just moves code around. Signed-off-by: Tony Lindgren Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 4b2317426c8e..cf0415e5687b 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -386,21 +386,6 @@ static void push_desc_queue(struct cppi41_channel *c) u32 desc_phys; u32 reg; - desc_phys = lower_32_bits(c->desc_phys); - desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); - WARN_ON(cdd->chan_busy[desc_num]); - cdd->chan_busy[desc_num] = c; - - reg = (sizeof(struct cppi41_desc) - 24) / 4; - reg |= desc_phys; - cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); -} - -static void cppi41_dma_issue_pending(struct dma_chan *chan) -{ - struct cppi41_channel *c = to_cpp41_chan(chan); - u32 reg; - c->residue = 0; reg = GCR_CHAN_ENABLE; @@ -418,6 +403,21 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) * before starting the dma engine. */ __iowmb(); + + desc_phys = lower_32_bits(c->desc_phys); + desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); + WARN_ON(cdd->chan_busy[desc_num]); + cdd->chan_busy[desc_num] = c; + + reg = (sizeof(struct cppi41_desc) - 24) / 4; + reg |= desc_phys; + cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); +} + +static void cppi41_dma_issue_pending(struct dma_chan *chan) +{ + struct cppi41_channel *c = to_cpp41_chan(chan); + push_desc_queue(c); } From fdea2d09b997ba4c86e7a707a5fac87c305f2131 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 31 Aug 2016 07:19:59 -0700 Subject: [PATCH 066/169] dmaengine: cppi41: Add basic PM runtime support Let's keep the device enabled between cppi41_dma_issue_pending() and dmaengine_desc_get_callback_invoke() and rely on the PM runtime autoidle timeout elsewhere. As the PM runtime is for whole device, not for each channel, we need to queue pending transfers if the device is PM runtime suspended. Then we start the pending transfers in PM runtime resume. Signed-off-by: Tony Lindgren Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 104 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 99 insertions(+), 5 deletions(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index cf0415e5687b..499d8f48e030 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -108,6 +108,8 @@ struct cppi41_channel { unsigned td_queued:1; unsigned td_seen:1; unsigned td_desc_seen:1; + + struct list_head node; /* Node for pending list */ }; struct cppi41_desc { @@ -146,6 +148,9 @@ struct cppi41_dd { const struct chan_queues *queues_tx; struct chan_queues td_queue; + struct list_head pending; /* Pending queued transfers */ + spinlock_t lock; /* Lock for pending list */ + /* context for suspend/resume */ unsigned int dma_tdfdq; }; @@ -332,6 +337,10 @@ static irqreturn_t cppi41_irq(int irq, void *data) c->residue = pd_trans_len(c->desc->pd6) - len; dma_cookie_complete(&c->txd); c->txd.callback(c->txd.callback_param); + + /* Paired with cppi41_dma_issue_pending */ + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); } } return IRQ_HANDLED; @@ -349,6 +358,12 @@ static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx) static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + int error; + + error = pm_runtime_get_sync(cdd->ddev.dev); + if (error < 0) + return error; dma_cookie_init(chan); dma_async_tx_descriptor_init(&c->txd, chan); @@ -357,11 +372,26 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) if (!c->is_tx) cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0); + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + return 0; } static void cppi41_dma_free_chan_resources(struct dma_chan *chan) { + struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + int error; + + error = pm_runtime_get_sync(cdd->ddev.dev); + if (error < 0) + return; + + WARN_ON(!list_empty(&cdd->pending)); + + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); } static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan, @@ -414,11 +444,35 @@ static void push_desc_queue(struct cppi41_channel *c) cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num)); } +static void pending_desc(struct cppi41_channel *c) +{ + struct cppi41_dd *cdd = c->cdd; + unsigned long flags; + + spin_lock_irqsave(&cdd->lock, flags); + list_add_tail(&c->node, &cdd->pending); + spin_unlock_irqrestore(&cdd->lock, flags); +} + static void cppi41_dma_issue_pending(struct dma_chan *chan) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct cppi41_dd *cdd = c->cdd; + int error; - push_desc_queue(c); + /* PM runtime paired with dmaengine_desc_get_callback_invoke */ + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) { + dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", + error); + + return; + } + + if (likely(pm_runtime_active(cdd->ddev.dev))) + push_desc_queue(c); + else + pending_desc(c); } static u32 get_host_pd0(u32 length) @@ -940,12 +994,18 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ctrl_mem = of_iomap(dev->of_node, 1); cdd->sched_mem = of_iomap(dev->of_node, 2); cdd->qmgr_mem = of_iomap(dev->of_node, 3); + spin_lock_init(&cdd->lock); + INIT_LIST_HEAD(&cdd->pending); + + platform_set_drvdata(pdev, cdd); if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem || !cdd->qmgr_mem) return -ENXIO; pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, 100); + pm_runtime_use_autosuspend(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_get_sync; @@ -985,7 +1045,9 @@ static int cppi41_dma_probe(struct platform_device *pdev) if (ret) goto err_of; - platform_set_drvdata(pdev, cdd); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return 0; err_of: dma_async_device_unregister(&cdd->ddev); @@ -996,7 +1058,8 @@ err_irq: err_chans: deinit_cppi41(dev, cdd); err_init_cppi: - pm_runtime_put(dev); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_put_sync(dev); err_get_sync: pm_runtime_disable(dev); iounmap(cdd->usbss_mem); @@ -1021,7 +1084,8 @@ static int cppi41_dma_remove(struct platform_device *pdev) iounmap(cdd->ctrl_mem); iounmap(cdd->sched_mem); iounmap(cdd->qmgr_mem); - pm_runtime_put(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } @@ -1062,9 +1126,39 @@ static int cppi41_resume(struct device *dev) return 0; } + +static int cppi41_runtime_suspend(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + + WARN_ON(!list_empty(&cdd->pending)); + + return 0; +} + +static int cppi41_runtime_resume(struct device *dev) +{ + struct cppi41_dd *cdd = dev_get_drvdata(dev); + struct cppi41_channel *c, *_c; + unsigned long flags; + + spin_lock_irqsave(&cdd->lock, flags); + list_for_each_entry_safe(c, _c, &cdd->pending, node) { + push_desc_queue(c); + list_del(&c->node); + } + spin_unlock_irqrestore(&cdd->lock, flags); + + return 0; +} #endif -static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume); +static const struct dev_pm_ops cppi41_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume) + SET_RUNTIME_PM_OPS(cppi41_runtime_suspend, + cppi41_runtime_resume, + NULL) +}; static struct platform_driver cpp41_dma_driver = { .probe = cppi41_dma_probe, From 8a31f8b5db65b860fd0d358dc27f6daf26074406 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Wed, 31 Aug 2016 11:10:27 -0400 Subject: [PATCH 067/169] dmaengine: qcom_hidma: release the descriptor before the callback There is a race condition between data transfer callback and descriptor free code. The callback routine may decide to clear the resources even though the descriptor has not yet been freed. Instead of calling the callback first and then releasing the memory, this code is changing the order to return the descriptor back to the free pool and then call the user provided callback. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 1197fbf8f30e..b8493bafdb3f 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan) struct dma_async_tx_descriptor *desc; dma_cookie_t last_cookie; struct hidma_desc *mdesc; + struct hidma_desc *next; unsigned long irqflags; struct list_head list; @@ -122,8 +123,9 @@ static void hidma_process_completed(struct hidma_chan *mchan) spin_unlock_irqrestore(&mchan->lock, irqflags); /* Execute callbacks and run dependencies */ - list_for_each_entry(mdesc, &list, node) { + list_for_each_entry_safe(mdesc, next, &list, node) { enum dma_status llstat; + struct dmaengine_desc_callback cb; desc = &mdesc->desc; @@ -132,18 +134,18 @@ static void hidma_process_completed(struct hidma_chan *mchan) spin_unlock_irqrestore(&mchan->lock, irqflags); llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); - if (llstat == DMA_COMPLETE) - dmaengine_desc_get_callback_invoke(desc, NULL); + dmaengine_desc_get_callback(desc, &cb); last_cookie = desc->cookie; dma_run_dependencies(desc); + + spin_lock_irqsave(&mchan->lock, irqflags); + list_move(&mdesc->node, &mchan->free); + spin_unlock_irqrestore(&mchan->lock, irqflags); + + if (llstat == DMA_COMPLETE) + dmaengine_desc_callback_invoke(&cb, NULL); } - - /* Free descriptors */ - spin_lock_irqsave(&mchan->lock, irqflags); - list_splice_tail_init(&list, &mchan->free); - spin_unlock_irqrestore(&mchan->lock, irqflags); - } /* From 55c370e5198e8cf28b1529299e9c1bfe237c9c1e Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Wed, 31 Aug 2016 11:10:28 -0400 Subject: [PATCH 068/169] dmaengine: qcom_hidma: report transfer errors with new interface Pass the DMA errors to the client by passing a result argument. The HW only supports a generic error when something goes wrong. That's why, using DMA_TRANS_ABORTED all the time. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index b8493bafdb3f..ea24863794b9 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -126,6 +126,7 @@ static void hidma_process_completed(struct hidma_chan *mchan) list_for_each_entry_safe(mdesc, next, &list, node) { enum dma_status llstat; struct dmaengine_desc_callback cb; + struct dmaengine_result result; desc = &mdesc->desc; @@ -141,10 +142,15 @@ static void hidma_process_completed(struct hidma_chan *mchan) spin_lock_irqsave(&mchan->lock, irqflags); list_move(&mdesc->node, &mchan->free); - spin_unlock_irqrestore(&mchan->lock, irqflags); if (llstat == DMA_COMPLETE) - dmaengine_desc_callback_invoke(&cb, NULL); + result.result = DMA_TRANS_NOERROR; + else + result.result = DMA_TRANS_ABORTED; + + spin_unlock_irqrestore(&mchan->lock, irqflags); + + dmaengine_desc_callback_invoke(&cb, &result); } } From 793ae66c7dcc7e6655029f6613221a111b15b58e Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Wed, 31 Aug 2016 11:10:29 -0400 Subject: [PATCH 069/169] dmaengine: qcom_hidma: add error reporting for tx_status The HIDMA driver is capable of error detection. However, the error was not being passed back to the client when tx_status API is called. Changing the error handling behavior to follow this oder. 1. dmaengine asserts error interrupt 2. Driver receives and mark's the txn as error 3. Driver completes the txn and intimates the client. No further submissions. Drop the locks before calling callback, as subsequent processing by client maybe in callback thread. 4. Client invokes status and you can return error 5. On error, client calls terminate_all. You can reset channel, free all descriptors in the active, pending and completed lists 6. Client prepares new txn and so on. As part of this work, got rid of the reset in the interrupt handler when an error happens and the HW is put into disabled state. The only way to recover is for the client to terminate the channel. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma.c | 30 +++++++++++++++++++++++++----- drivers/dma/qcom/hidma.h | 2 +- drivers/dma/qcom/hidma_ll.c | 32 +++++++------------------------- 3 files changed, 33 insertions(+), 31 deletions(-) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index ea24863794b9..e244e10a94b5 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -129,6 +129,7 @@ static void hidma_process_completed(struct hidma_chan *mchan) struct dmaengine_result result; desc = &mdesc->desc; + last_cookie = desc->cookie; spin_lock_irqsave(&mchan->lock, irqflags); dma_cookie_complete(desc); @@ -137,15 +138,15 @@ static void hidma_process_completed(struct hidma_chan *mchan) llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); dmaengine_desc_get_callback(desc, &cb); - last_cookie = desc->cookie; dma_run_dependencies(desc); spin_lock_irqsave(&mchan->lock, irqflags); list_move(&mdesc->node, &mchan->free); - if (llstat == DMA_COMPLETE) + if (llstat == DMA_COMPLETE) { + mchan->last_success = last_cookie; result.result = DMA_TRANS_NOERROR; - else + } else result.result = DMA_TRANS_ABORTED; spin_unlock_irqrestore(&mchan->lock, irqflags); @@ -246,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach) hidma_ll_start(dmadev->lldev); } +static inline bool hidma_txn_is_success(dma_cookie_t cookie, + dma_cookie_t last_success, dma_cookie_t last_used) +{ + if (last_success <= last_used) { + if ((cookie <= last_success) || (cookie > last_used)) + return true; + } else { + if ((cookie <= last_success) && (cookie > last_used)) + return true; + } + return false; +} + static enum dma_status hidma_tx_status(struct dma_chan *dmach, dma_cookie_t cookie, struct dma_tx_state *txstate) @@ -254,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach, enum dma_status ret; ret = dma_cookie_status(dmach, cookie, txstate); - if (ret == DMA_COMPLETE) - return ret; + if (ret == DMA_COMPLETE) { + bool is_success; + + is_success = hidma_txn_is_success(cookie, mchan->last_success, + dmach->cookie); + return is_success ? ret : DMA_ERROR; + } if (mchan->paused && (ret == DMA_IN_PROGRESS)) { unsigned long flags; @@ -406,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan) hidma_process_completed(mchan); spin_lock_irqsave(&mchan->lock, irqflags); + mchan->last_success = 0; list_splice_init(&mchan->active, &list); list_splice_init(&mchan->prepared, &list); list_splice_init(&mchan->completed, &list); diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index db413a5efc4e..e52e20716303 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -72,7 +72,6 @@ struct hidma_lldev { u32 tre_write_offset; /* TRE write location */ struct tasklet_struct task; /* task delivering notifications */ - struct tasklet_struct rst_task; /* task to reset HW */ DECLARE_KFIFO_PTR(handoff_fifo, struct hidma_tre *); /* pending TREs FIFO */ }; @@ -89,6 +88,7 @@ struct hidma_chan { bool allocated; char dbg_name[16]; u32 dma_sig; + dma_cookie_t last_success; /* * active descriptor on this channel diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index ad20dfb64c71..3224f24c577b 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -380,27 +380,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) return 0; } -/* - * Abort all transactions and perform a reset. - */ -static void hidma_ll_abort(unsigned long arg) -{ - struct hidma_lldev *lldev = (struct hidma_lldev *)arg; - u8 err_code = HIDMA_EVRE_STATUS_ERROR; - u8 err_info = 0xFF; - int rc; - - hidma_cleanup_pending_tre(lldev, err_info, err_code); - - /* reset the channel for recovery */ - rc = hidma_ll_setup(lldev); - if (rc) { - dev_err(lldev->dev, "channel reinitialize failed after error\n"); - return; - } - writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); -} - /* * The interrupt handler for HIDMA will try to consume as many pending * EVRE from the event queue as possible. Each EVRE has an associated @@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg) while (cause) { if (cause & HIDMA_ERR_INT_MASK) { - dev_err(lldev->dev, "error 0x%x, resetting...\n", + dev_err(lldev->dev, "error 0x%x, disabling...\n", cause); /* Clear out pending interrupts */ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); - tasklet_schedule(&lldev->rst_task); + /* No further submissions. */ + hidma_ll_disable(lldev); + + /* Driver completes the txn and intimates the client.*/ + hidma_cleanup_pending_tre(lldev, 0xFF, + HIDMA_EVRE_STATUS_ERROR); goto out; } @@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres, return NULL; spin_lock_init(&lldev->lock); - tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev); tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev); lldev->initialized = 1; writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG); @@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev) required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres; tasklet_kill(&lldev->task); - tasklet_kill(&lldev->rst_task); memset(lldev->trepool, 0, required_bytes); lldev->trepool = NULL; lldev->pending_tre_count = 0; From e9405ef08ca8d9e702f4a1b58b4fa992a7c9f137 Mon Sep 17 00:00:00 2001 From: Sinan Kaya Date: Thu, 1 Sep 2016 10:02:55 -0400 Subject: [PATCH 070/169] dmaengine: dmatest: exclude compare and fill time during perf report Dmatest is currently including compare and fill time into the calculated performance numbers. This does not reflect the HW capability and the results vary based on the CPU speed instead of the HW speed. Signed-off-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 1245db5438e1..738fbd1dd57a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -426,7 +426,9 @@ static int dmatest_func(void *data) int src_cnt; int dst_cnt; int i; - ktime_t ktime; + ktime_t ktime, start, diff; + ktime_t filltime = ktime_set(0, 0); + ktime_t comparetime = ktime_set(0, 0); s64 runtime = 0; unsigned long long total_len = 0; @@ -531,6 +533,7 @@ static int dmatest_func(void *data) src_off = 0; dst_off = 0; } else { + start = ktime_get(); src_off = dmatest_random() % (params->buf_size - len + 1); dst_off = dmatest_random() % (params->buf_size - len + 1); @@ -541,6 +544,9 @@ static int dmatest_func(void *data) params->buf_size); dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); + + diff = ktime_sub(ktime_get(), start); + filltime = ktime_add(filltime, diff); } um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, @@ -683,6 +689,7 @@ static int dmatest_func(void *data) continue; } + start = ktime_get(); pr_debug("%s: verifying source buffer...\n", current->comm); error_count = dmatest_verify(thread->srcs, 0, src_off, 0, PATTERN_SRC, true); @@ -703,6 +710,9 @@ static int dmatest_func(void *data) params->buf_size, dst_off + len, PATTERN_DST, false); + diff = ktime_sub(ktime_get(), start); + comparetime = ktime_add(comparetime, diff); + if (error_count) { result("data error", total_tests, src_off, dst_off, len, error_count); @@ -712,7 +722,10 @@ static int dmatest_func(void *data) dst_off, len, 0); } } - runtime = ktime_us_delta(ktime_get(), ktime); + ktime = ktime_sub(ktime_get(), ktime); + ktime = ktime_sub(ktime, comparetime); + ktime = ktime_sub(ktime, filltime); + runtime = ktime_to_us(ktime); ret = 0; err_dstbuf: From 2d9e31b9412cfcb432c1815fef0f72bc0a45542b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 2 Sep 2016 23:41:10 +0200 Subject: [PATCH 071/169] dmaengine: moxart: remove NO_IRQ The use of NO_IRQ is incorrect here and should never have been there, as irq_of_parse_and_map() returns '0' on failure, not NO_IRQ. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/moxart-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index a6e642792e5a..e1a5c2242f6f 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -579,7 +579,7 @@ static int moxart_probe(struct platform_device *pdev) return -ENOMEM; irq = irq_of_parse_and_map(node, 0); - if (irq == NO_IRQ) { + if (!irq) { dev_err(dev, "no IRQ resource\n"); return -EINVAL; } From 028e84a1de7cba6a3e4cf1a22094b76a4b92cefb Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 3 Sep 2016 01:01:06 +0200 Subject: [PATCH 072/169] dmaengine: mxs: remove NO_IRQ check The mxs_chan->chan_irq variable is guaranteed to never be NO_IRQ, as it gets assigned the result of platform_get_irq() that returns either a valid positive interrupt number, or a negative failure code that leads to the channel not being used. This removes the redundant check, eliminating one more instance of NO_IRQ. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/mxs-dma.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 60de35251da5..1e3572bb7012 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -429,12 +429,10 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) goto err_alloc; } - if (mxs_chan->chan_irq != NO_IRQ) { - ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, - 0, "mxs-dma", mxs_dma); - if (ret) - goto err_irq; - } + ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, + 0, "mxs-dma", mxs_dma); + if (ret) + goto err_irq; ret = clk_prepare_enable(mxs_dma->clk); if (ret) From 524c6e04f826cea8a34a27136d8f5925df9213ed Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 3 Sep 2016 01:17:20 +0200 Subject: [PATCH 073/169] dmaengine: sirf: fix irq number error check irq_of_parse_and_map() returns 0 on error, no NO_IRQ, so the failure condition can never be met. This changes the comparison to check for zero instead. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/sirf-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index d8bc3f2a71db..f5abe29e748f 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -869,7 +869,7 @@ static int sirfsoc_dma_probe(struct platform_device *op) } sdma->irq = irq_of_parse_and_map(dn, 0); - if (sdma->irq == NO_IRQ) { + if (!sdma->irq) { dev_err(dev, "Error mapping IRQ!\n"); return -EINVAL; } From 86c7e6836479c4045a9a81ed5ea76c51d719f9c1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 3 Sep 2016 01:22:02 +0200 Subject: [PATCH 074/169] dmaengine: ipu: remove bogus NO_IRQ reference A workaround for a warning introduced a use of the NO_IRQ macro that should have been gone for a long time. It is clear from the code that the value cannot actually be used, but apparently there was a configuration at some point that caused a warning, so instead of just reverting that patch, this rearranges the code in a way that the warning cannot reappear. Signed-off-by: Arnd Bergmann Fixes: 6ef41cf6f721 ("dmaengine :ipu: change ipu_irq_handler() to remove compile warning") Signed-off-by: Vinod Koul --- drivers/dma/ipu/ipu_irq.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c index 2bf37e68ad0f..dd184b50e5b4 100644 --- a/drivers/dma/ipu/ipu_irq.c +++ b/drivers/dma/ipu/ipu_irq.c @@ -286,22 +286,21 @@ static void ipu_irq_handler(struct irq_desc *desc) raw_spin_unlock(&bank_lock); while ((line = ffs(status))) { struct ipu_irq_map *map; - unsigned int irq = NO_IRQ; + unsigned int irq; line--; status &= ~(1UL << line); raw_spin_lock(&bank_lock); map = src2map(32 * i + line); - if (map) - irq = map->irq; - raw_spin_unlock(&bank_lock); - if (!map) { + raw_spin_unlock(&bank_lock); pr_err("IPU: Interrupt on unmapped source %u bank %d\n", line, i); continue; } + irq = map->irq; + raw_spin_unlock(&bank_lock); generic_handle_irq(irq); } } From 5f03c39978e3437398d4777215c5818e62118b2c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 6 Sep 2016 15:17:49 +0200 Subject: [PATCH 075/169] dmaengine: k3dma: use correct format string for debug output The newly added k3_dma_prep_dma_cyclic function has some debug output that uses incorrect typecasts, some of which cause a warning like: drivers/dma/k3dma.c: In function 'k3_dma_prep_dma_cyclic': drivers/dma/k3dma.c:589:671: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast] In general, we have to print 'dma_addr_t' values using special '%pad' format to get the correct behavior on kernels that have a 64-bit dma_addr_t type but 32-bit pointers. Similarly, printing size_t values should be done using the %z modifier to get the correct behavior on 64-bit kernels. Signed-off-by: Arnd Bergmann Fixes: a7e08fa6cc78 ("k3dma: Add cyclic mode for audio") Signed-off-by: Vinod Koul --- drivers/dma/k3dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 814e6e04e15c..aabcb7934b05 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -586,9 +586,9 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t modulo = DMA_CYCLIC_MAX_PERIOD; u32 en_tc2 = 0; - dev_dbg(chan->device->dev, "%s: buf %p, dst %p, buf len %d, period_len = %d, dir %d\n", - __func__, (void *)buf_addr, (void *)to_k3_chan(chan)->dev_addr, - (int)buf_len, (int)period_len, (int)dir); + dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n", + __func__, &buf_addr, &to_k3_chan(chan)->dev_addr, + buf_len, period_len, (int)dir); avail = buf_len; if (avail > modulo) From 522ef6144fe46ec2a74fa8778a73f2bd2cf0f9bb Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 6 Sep 2016 15:20:05 +0200 Subject: [PATCH 076/169] dmaengine: cppi41: mark PM functions as __maybe_unused When CONFIG_PM_SLEEP is disabled, we get a build error in the cppi41 dmaengine driver, since the runtime-pm functions are hidden within the wrong #ifdef: drivers/dma/cppi41.c:1158:21: error: 'cppi41_runtime_suspend' undeclared here (not in a function) This removes the #ifdef and instead uses __maybe_unused annotations that cannot have this problem. Signed-off-by: Arnd Bergmann Fixes: fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 499d8f48e030..2f1994ec245f 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -1090,8 +1090,7 @@ static int cppi41_dma_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int cppi41_suspend(struct device *dev) +static int __maybe_unused cppi41_suspend(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); @@ -1102,7 +1101,7 @@ static int cppi41_suspend(struct device *dev) return 0; } -static int cppi41_resume(struct device *dev) +static int __maybe_unused cppi41_resume(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); struct cppi41_channel *c; @@ -1127,7 +1126,7 @@ static int cppi41_resume(struct device *dev) return 0; } -static int cppi41_runtime_suspend(struct device *dev) +static int __maybe_unused cppi41_runtime_suspend(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); @@ -1136,7 +1135,7 @@ static int cppi41_runtime_suspend(struct device *dev) return 0; } -static int cppi41_runtime_resume(struct device *dev) +static int __maybe_unused cppi41_runtime_resume(struct device *dev) { struct cppi41_dd *cdd = dev_get_drvdata(dev); struct cppi41_channel *c, *_c; @@ -1151,7 +1150,6 @@ static int cppi41_runtime_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops cppi41_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume) From c8a2c191f56dfd1bc132781ef47a46e0c6a4d18d Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 7 Sep 2016 18:24:28 -0700 Subject: [PATCH 077/169] dmaengine: dmatest: Apply copy_align to DMA_SG as well The DMA_SG is still a type of memory copy operation that should conform the hardware restriction. So this patch just applies the copy_align to DMA_SG as well. Signed-off-by: Nicolin Chen Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 738fbd1dd57a..53e972819ccd 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -505,7 +505,7 @@ static int dmatest_func(void *data) total_tests++; /* honor alignment restrictions */ - if (thread->type == DMA_MEMCPY) + if (thread->type == DMA_MEMCPY || thread->type == DMA_SG) align = dev->copy_align; else if (thread->type == DMA_XOR) align = dev->xor_align; From d64e9a2c750930272492952c16f3f2c95311a6c9 Mon Sep 17 00:00:00 2001 From: Stephen Barber Date: Thu, 18 Aug 2016 17:59:59 -0700 Subject: [PATCH 078/169] dmaengine: pl330: fix residual for non-running BUSY descriptors Only one descriptor in the work list should be running at any given time, but it's possible to have an enqueued BUSY descriptor that has not yet transferred any data, or for a BUSY descriptor to linger briefly before transitioning to DONE. These cases should be handled to keep residual calculations consistent even with the non-running BUSY descriptors in the work list. Signed-off-by: Stephen Barber Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4fc3ffbd5ca0..31e9c49e5604 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2274,7 +2274,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, { enum dma_status ret; unsigned long flags; - struct dma_pl330_desc *desc, *running = NULL; + struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL; struct dma_pl330_chan *pch = to_pchan(chan); unsigned int transferred, residual = 0; @@ -2291,6 +2291,8 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (pch->thread->req_running != -1) running = pch->thread->req[pch->thread->req_running].desc; + last_enq = pch->thread->req[pch->thread->lstenq].desc; + /* Check in pending list */ list_for_each_entry(desc, &pch->work_list, node) { if (desc->status == DONE) @@ -2298,6 +2300,15 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); + else if (desc->status == BUSY) + /* + * Busy but not running means either just enqueued, + * or finished and not yet marked done + */ + if (desc == last_enq) + transferred = 0; + else + transferred = desc->bytes_requested; else transferred = 0; residual += desc->bytes_requested - transferred; From a40235a2278a315261ee007fc433ec1cfb31666f Mon Sep 17 00:00:00 2001 From: Hsin-Yu Chao Date: Tue, 23 Aug 2016 17:16:55 +0800 Subject: [PATCH 079/169] dmaengine: pl330: Acquire dmac's spinlock in pl330_tx_status There is a racing when accessing dmac thread in pl330_tx_status that the pl330_update is handling active request at the same time and changing the status of descriptors. This could cause an invalid transferred count from BUSY descriptor added up to the residual number. Fix the bug by using the dmac's spinlock in pl330_tx_status to protect thread resources from changing. Note that the nested order of holding dmac's and dma_chan's spinlock is consistent with the rest of the driver: dma_chan first and then dmac, so it is safe from deadlock scenario. Signed-off-by: Hsin-Yu Chao Reviewed-by: Guenter Roeck Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 31e9c49e5604..c4d61528072f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2287,6 +2287,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, goto out; spin_lock_irqsave(&pch->lock, flags); + spin_lock(&pch->thread->dmac->lock); if (pch->thread->req_running != -1) running = pch->thread->req[pch->thread->req_running].desc; @@ -2329,6 +2330,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, if (desc->last) residual = 0; } + spin_unlock(&pch->thread->dmac->lock); spin_unlock_irqrestore(&pch->lock, flags); out: From aa570be6de67f3772cc850a7bfbe659214aa9ee4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 10 Sep 2016 19:56:04 +1000 Subject: [PATCH 080/169] dmaengine: NO_IRQ removal from powerpc-only drivers We'd like to eventually remove NO_IRQ on powerpc, so remove usages of it from powerpc-only drivers. Signed-off-by: Michael Ellerman Signed-off-by: Vinod Koul --- drivers/dma/bestcomm/bestcomm.c | 4 ++-- drivers/dma/fsl_raid.c | 2 +- drivers/dma/fsldma.c | 12 ++++++------ drivers/dma/mpc512x_dma.c | 4 ++-- drivers/dma/ppc4xx/adma.c | 4 ++-- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 7ce843723003..7a67b8345092 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -82,7 +82,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) /* Get IRQ of that task */ tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum); - if (tsk->irq == NO_IRQ) + if (!tsk->irq) goto error; /* Init the BDs, if needed */ @@ -104,7 +104,7 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size) error: if (tsk) { - if (tsk->irq != NO_IRQ) + if (tsk->irq) irq_dispose_mapping(tsk->irq); bcom_sram_free(tsk->bd); kfree(tsk->cookie); diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index aad167eaaee8..496ff8e7d7f9 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -670,7 +670,7 @@ static int fsl_re_chan_probe(struct platform_device *ofdev, /* read irq property from dts */ chan->irq = irq_of_parse_and_map(np, 0); - if (chan->irq == NO_IRQ) { + if (!chan->irq) { dev_err(dev, "No IRQ defined for JR %d\n", q); ret = -ENODEV; goto err_free; diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 911b7177eb50..4b7a66d357d2 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1153,7 +1153,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) struct fsldma_chan *chan; int i; - if (fdev->irq != NO_IRQ) { + if (fdev->irq) { dev_dbg(fdev->dev, "free per-controller IRQ\n"); free_irq(fdev->irq, fdev); return; @@ -1161,7 +1161,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev) for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { chan = fdev->chan[i]; - if (chan && chan->irq != NO_IRQ) { + if (chan && chan->irq) { chan_dbg(chan, "free per-channel IRQ\n"); free_irq(chan->irq, chan); } @@ -1175,7 +1175,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) int i; /* if we have a per-controller IRQ, use that */ - if (fdev->irq != NO_IRQ) { + if (fdev->irq) { dev_dbg(fdev->dev, "request per-controller IRQ\n"); ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, "fsldma-controller", fdev); @@ -1188,7 +1188,7 @@ static int fsldma_request_irqs(struct fsldma_device *fdev) if (!chan) continue; - if (chan->irq == NO_IRQ) { + if (!chan->irq) { chan_err(chan, "interrupts property missing in device tree\n"); ret = -ENODEV; goto out_unwind; @@ -1211,7 +1211,7 @@ out_unwind: if (!chan) continue; - if (chan->irq == NO_IRQ) + if (!chan->irq) continue; free_irq(chan->irq, chan); @@ -1311,7 +1311,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, list_add_tail(&chan->common.device_node, &fdev->common.channels); dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, - chan->irq != NO_IRQ ? chan->irq : fdev->irq); + chan->irq ? chan->irq : fdev->irq); return 0; diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index fa86592c7ae1..9dd99ba18fce 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -926,7 +926,7 @@ static int mpc_dma_probe(struct platform_device *op) } mdma->irq = irq_of_parse_and_map(dn, 0); - if (mdma->irq == NO_IRQ) { + if (!mdma->irq) { dev_err(dev, "Error mapping IRQ!\n"); retval = -EINVAL; goto err; @@ -935,7 +935,7 @@ static int mpc_dma_probe(struct platform_device *op) if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { mdma->is_mpc8308 = 1; mdma->irq2 = irq_of_parse_and_map(dn, 1); - if (mdma->irq2 == NO_IRQ) { + if (!mdma->irq2) { dev_err(dev, "Error mapping IRQ!\n"); retval = -EINVAL; goto err_dispose1; diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index da3688b94bdc..66bd96724b2f 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -3891,7 +3891,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev, np = ofdev->dev.of_node; if (adev->id != PPC440SPE_XOR_ID) { adev->err_irq = irq_of_parse_and_map(np, 1); - if (adev->err_irq == NO_IRQ) { + if (!adev->err_irq) { dev_warn(adev->dev, "no err irq resource?\n"); *initcode = PPC_ADMA_INIT_IRQ2; adev->err_irq = -ENXIO; @@ -3902,7 +3902,7 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev, } adev->irq = irq_of_parse_and_map(np, 0); - if (adev->irq == NO_IRQ) { + if (!adev->irq) { dev_err(adev->dev, "no irq resource\n"); *initcode = PPC_ADMA_INIT_IRQ1; ret = -ENXIO; From d4c77c0532e2eceaccdeeec493b2e39666c97406 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 13 Sep 2016 10:58:43 +0300 Subject: [PATCH 081/169] dmaengine: omap-dma: Correct type2 descriptor's member types The type of CDEI, CSEI, CDFI and CSFI is signed. This did not caused issue so far as we only use unsigned values. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index aef52011d7c0..1b7f5f32bf74 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -71,10 +71,10 @@ struct omap_type2_desc { uint32_t addr; /* src or dst */ uint16_t fn; uint16_t cicr; - uint16_t cdei; - uint16_t csei; - uint32_t cdfi; - uint32_t csfi; + int16_t cdei; + int16_t csei; + int32_t cdfi; + int32_t csfi; } __packed; struct omap_sg { From 360af35b08da9def3be8b67398f4e0f90c292e37 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 13 Sep 2016 03:08:17 +0900 Subject: [PATCH 082/169] dmaengine: cleanup with list_first_entry_or_null() The combo of list_empty() check and return list_first_entry() can be replaced with list_first_entry_or_null(). Signed-off-by: Masahiro Yamada Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 22 ++++------------------ drivers/dma/ep93xx_dma.c | 6 ++---- drivers/dma/ste_dma40.c | 36 +++++------------------------------- drivers/dma/virt-dma.h | 6 ++---- 4 files changed, 13 insertions(+), 57 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e4acd63e42aa..a373ecacfaba 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1553,15 +1553,8 @@ coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) static struct coh901318_desc * coh901318_first_active_get(struct coh901318_chan *cohc) { - struct coh901318_desc *d; - - if (list_empty(&cohc->active)) - return NULL; - - d = list_first_entry(&cohc->active, - struct coh901318_desc, - node); - return d; + return list_first_entry_or_null(&cohc->active, struct coh901318_desc, + node); } static void @@ -1579,15 +1572,8 @@ coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) static struct coh901318_desc * coh901318_first_queued(struct coh901318_chan *cohc) { - struct coh901318_desc *d; - - if (list_empty(&cohc->queue)) - return NULL; - - d = list_first_entry(&cohc->queue, - struct coh901318_desc, - node); - return d; + return list_first_entry_or_null(&cohc->queue, struct coh901318_desc, + node); } static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 21f08cc3352b..2ffaca25267e 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -262,10 +262,8 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, static struct ep93xx_dma_desc * ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) { - if (list_empty(&edmac->active)) - return NULL; - - return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); + return list_first_entry_or_null(&edmac->active, + struct ep93xx_dma_desc, node); } /** diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8b18e44a02d5..e43d2bbfd122 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -941,15 +941,7 @@ static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) { - struct d40_desc *d; - - if (list_empty(&d40c->active)) - return NULL; - - d = list_first_entry(&d40c->active, - struct d40_desc, - node); - return d; + return list_first_entry_or_null(&d40c->active, struct d40_desc, node); } /* remove desc from current queue and add it to the pending_queue */ @@ -962,36 +954,18 @@ static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) static struct d40_desc *d40_first_pending(struct d40_chan *d40c) { - struct d40_desc *d; - - if (list_empty(&d40c->pending_queue)) - return NULL; - - d = list_first_entry(&d40c->pending_queue, - struct d40_desc, - node); - return d; + return list_first_entry_or_null(&d40c->pending_queue, struct d40_desc, + node); } static struct d40_desc *d40_first_queued(struct d40_chan *d40c) { - struct d40_desc *d; - - if (list_empty(&d40c->queue)) - return NULL; - - d = list_first_entry(&d40c->queue, - struct d40_desc, - node); - return d; + return list_first_entry_or_null(&d40c->queue, struct d40_desc, node); } static struct d40_desc *d40_first_done(struct d40_chan *d40c) { - if (list_empty(&d40c->done)) - return NULL; - - return list_first_entry(&d40c->done, struct d40_desc, node); + return list_first_entry_or_null(&d40c->done, struct d40_desc, node); } static int d40_psize_2_burst_size(bool is_log, int psize) diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index d9731ca5e262..a030ae7b1df2 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -123,10 +123,8 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) */ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) { - if (list_empty(&vc->desc_issued)) - return NULL; - - return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); + return list_first_entry_or_null(&vc->desc_issued, + struct virt_dma_desc, node); } /** From f2f6f828fc79509d7582d5f338ecf0795250d8b5 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 13 Sep 2016 10:22:43 -0700 Subject: [PATCH 083/169] dmaengine: cppi41: Ignore EINPROGRESS for PM runtime We can occasionally get -EINPROGRESS for pm_runtime_get. In that case we can just continue as we're queueing transfers anyways when pm_runtime_active is not set. Fixes: fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") Signed-off-by: Tony Lindgren Signed-off-by: Vinod Koul --- drivers/dma/cppi41.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index 2f1994ec245f..97f4d6c1b6b9 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -462,7 +462,7 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) /* PM runtime paired with dmaengine_desc_get_callback_invoke */ error = pm_runtime_get(cdd->ddev.dev); - if (error < 0) { + if ((error != -EINPROGRESS) && error < 0) { dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", error); From 434cec62a6d73b8c8080cd992bc97a564fdd5a5a Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 15 Sep 2016 07:37:30 +0200 Subject: [PATCH 084/169] bus: mvebu-mbus: Provide stub function for mvebu_mbus_get_io_win_info() This patch provides a stub function for mvebu_mbus_get_io_win_info(), which will be used for all non-Orion (ARM32 MVEBU) platforms for compile test coverage. On such platforms this function will return an error so that drivers might detect a potential problem. Signed-off-by: Stefan Roese Acked-by: Gregory CLEMENT Cc: Thomas Petazzoni Cc: Marcin Wojtas Cc: Arnd Bergmann Cc: Andrew Lunn Cc: Vinod Koul Signed-off-by: Vinod Koul --- include/linux/mbus.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/include/linux/mbus.h b/include/linux/mbus.h index d610232762e3..2931aa43dab1 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -11,6 +11,8 @@ #ifndef __LINUX_MBUS_H #define __LINUX_MBUS_H +#include + struct resource; struct mbus_dram_target_info @@ -55,6 +57,8 @@ struct mbus_dram_target_info #ifdef CONFIG_PLAT_ORION extern const struct mbus_dram_target_info *mv_mbus_dram_info(void); extern const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(void); +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr); #else static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void) { @@ -64,14 +68,24 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo { return NULL; } +static inline int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, + u8 *target, u8 *attr) +{ + /* + * On all ARM32 MVEBU platforms with MBus support, this stub + * function will not get called. The real function from the + * MBus driver is called instead. ARM64 MVEBU platforms like + * the Armada 3700 could use the mv_xor device driver which calls + * into this function + */ + return -EINVAL; +} #endif int mvebu_mbus_save_cpu_target(u32 __iomem *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); -int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, - u8 *attr); int mvebu_mbus_add_window_remap_by_id(unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, From 77ff7a706f014a56d38f07acf220f381a8fe0fd8 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 15 Sep 2016 07:37:31 +0200 Subject: [PATCH 085/169] dmaengine: mv_xor: Add support for IO (PCIe) src/dst areas To enable the access to a specific area, the MVEBU XOR controllers needs to have this area enabled / mapped via an address window. Right now, only the DRAM memory area is enabled via such memory windows. So using this driver to DMA to / from a e.g. PCIe memory region is currently not supported. This patch now adds support for such PCIe / IO regions by checking if the src / dst address is located in an IO memory area in contrast to being located in DRAM. This is done by using the newly introduced MBus function mvebu_mbus_get_io_win_info(). If the src / dst address is located in such an IO area, a new address window is created in the XOR DMA controller. Enabling the controller to access this area. Signed-off-by: Stefan Roese Cc: Gregory CLEMENT Cc: Thomas Petazzoni Cc: Marcin Wojtas Cc: Arnd Bergmann Cc: Andrew Lunn Cc: Vinod Koul Signed-off-by: Vinod Koul --- drivers/dma/mv_xor.c | 95 +++++++++++++++++++++++++++++++++++++++++++- drivers/dma/mv_xor.h | 7 ++++ 2 files changed, 101 insertions(+), 1 deletion(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index f4c9f98ec35e..ff4a094cd582 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -470,12 +470,90 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) return mv_chan->slots_allocated ? : -ENOMEM; } +/* + * Check if source or destination is an PCIe/IO address (non-SDRAM) and add + * a new MBus window if necessary. Use a cache for these check so that + * the MMIO mapped registers don't have to be accessed for this check + * to speed up this process. + */ +static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr) +{ + struct mv_xor_device *xordev = mv_chan->xordev; + void __iomem *base = mv_chan->mmr_high_base; + u32 win_enable; + u32 size; + u8 target, attr; + int ret; + int i; + + /* Nothing needs to get done for the Armada 3700 */ + if (xordev->xor_type == XOR_ARMADA_37XX) + return 0; + + /* + * Loop over the cached windows to check, if the requested area + * is already mapped. If this the case, nothing needs to be done + * and we can return. + */ + for (i = 0; i < WINDOW_COUNT; i++) { + if (addr >= xordev->win_start[i] && + addr <= xordev->win_end[i]) { + /* Window is already mapped */ + return 0; + } + } + + /* + * The window is not mapped, so we need to create the new mapping + */ + + /* If no IO window is found that addr has to be located in SDRAM */ + ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr); + if (ret < 0) + return 0; + + /* + * Mask the base addr 'addr' according to 'size' read back from the + * MBus window. Otherwise we might end up with an address located + * somewhere in the middle of this area here. + */ + size -= 1; + addr &= ~size; + + /* + * Reading one of both enabled register is enough, as they are always + * programmed to the identical values + */ + win_enable = readl(base + WINDOW_BAR_ENABLE(0)); + + /* Set 'i' to the first free window to write the new values to */ + i = ffs(~win_enable) - 1; + if (i >= WINDOW_COUNT) + return -ENOMEM; + + writel((addr & 0xffff0000) | (attr << 8) | target, + base + WINDOW_BASE(i)); + writel(size & 0xffff0000, base + WINDOW_SIZE(i)); + + /* Fill the caching variables for later use */ + xordev->win_start[i] = addr; + xordev->win_end[i] = addr + size; + + win_enable |= (1 << i); + win_enable |= 3 << (16 + (2 * i)); + writel(win_enable, base + WINDOW_BAR_ENABLE(0)); + writel(win_enable, base + WINDOW_BAR_ENABLE(1)); + + return 0; +} + static struct dma_async_tx_descriptor * mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_desc_slot *sw_desc; + int ret; if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) return NULL; @@ -486,6 +564,11 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", __func__, src_cnt, len, &dest, flags); + /* Check if a new window needs to get added for 'dest' */ + ret = mv_xor_add_io_win(mv_chan, dest); + if (ret) + return NULL; + sw_desc = mv_chan_alloc_slot(mv_chan); if (sw_desc) { sw_desc->type = DMA_XOR; @@ -493,8 +576,13 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, mv_desc_init(sw_desc, dest, len, flags); if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) mv_desc_set_mode(sw_desc); - while (src_cnt--) + while (src_cnt--) { + /* Check if a new window needs to get added for 'src' */ + ret = mv_xor_add_io_win(mv_chan, src[src_cnt]); + if (ret) + return NULL; mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); + } } dev_dbg(mv_chan_to_devp(mv_chan), @@ -959,6 +1047,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan->op_in_desc = XOR_MODE_IN_DESC; dma_dev = &mv_chan->dmadev; + mv_chan->xordev = xordev; /* * These source and destination dummy buffers are used to implement @@ -1086,6 +1175,10 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, dram->mbus_dram_target_id, base + WINDOW_BASE(i)); writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); + /* Fill the caching variables for later use */ + xordev->win_start[i] = cs->base; + xordev->win_end[i] = cs->base + cs->size - 1; + win_enable |= (1 << i); win_enable |= 3 << (16 + (2 * i)); } diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index bf56e082e7cd..88eeab222a23 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -80,12 +80,17 @@ #define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2)) #define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2)) +#define WINDOW_COUNT 8 + struct mv_xor_device { void __iomem *xor_base; void __iomem *xor_high_base; struct clk *clk; struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS]; int xor_type; + + u32 win_start[WINDOW_COUNT]; + u32 win_end[WINDOW_COUNT]; }; /** @@ -127,6 +132,8 @@ struct mv_xor_chan { char dummy_dst[MV_XOR_MIN_BYTE_COUNT]; dma_addr_t dummy_src_addr, dummy_dst_addr; u32 saved_config_reg, saved_int_mask_reg; + + struct mv_xor_device *xordev; }; /** From 71d0bc65ba089e8e769cddad66dae8cb4c49a0d4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 23 Aug 2016 16:09:40 +0300 Subject: [PATCH 086/169] dmaengine: hsu: refactor hsu_dma_do_irq() to return int Since we have nice macro IRQ_RETVAL() we would use it to convert a flag of handled interrupt from int to irqreturn_t. The rationale of doing this is: a) hence we implicitly mark hsu_dma_do_irq() as an auxiliary function that can't be used as interrupt handler directly, and b) to be in align with serial driver which is using serial8250_handle_irq() that returns plain int by design. Signed-off-by: Andy Shevchenko Acked-by: Greg Kroah-Hartman Signed-off-by: Vinod Koul --- drivers/dma/hsu/hsu.c | 9 ++++----- drivers/dma/hsu/pci.c | 6 +++--- drivers/tty/serial/8250/8250_mid.c | 8 ++++---- include/linux/dma/hsu.h | 9 ++++----- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index c5f21efd6090..29d04ca71d52 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -200,10 +200,9 @@ EXPORT_SYMBOL_GPL(hsu_dma_get_status); * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0. * * Return: - * IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise. + * 0 for invalid channel number, 1 otherwise. */ -irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, - u32 status) +int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status) { struct hsu_dma_chan *hsuc; struct hsu_dma_desc *desc; @@ -211,7 +210,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, /* Sanity check */ if (nr >= chip->hsu->nr_channels) - return IRQ_NONE; + return 0; hsuc = &chip->hsu->chan[nr]; @@ -230,7 +229,7 @@ irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, } spin_unlock_irqrestore(&hsuc->vchan.lock, flags); - return IRQ_HANDLED; + return 1; } EXPORT_SYMBOL_GPL(hsu_dma_do_irq); diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 9916058531d9..b51639f045ed 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -29,7 +29,7 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) u32 dmaisr; u32 status; unsigned short i; - irqreturn_t ret = IRQ_NONE; + int ret = 0; int err; dmaisr = readl(chip->regs + HSU_PCI_DMAISR); @@ -37,14 +37,14 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) if (dmaisr & 0x1) { err = hsu_dma_get_status(chip, i, &status); if (err > 0) - ret |= IRQ_HANDLED; + ret |= 1; else if (err == 0) ret |= hsu_dma_do_irq(chip, i, status); } dmaisr >>= 1; } - return ret; + return IRQ_RETVAL(ret); } static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index 339de9cd0866..121a7f2d4697 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -99,27 +99,27 @@ static int dnv_handle_irq(struct uart_port *p) struct uart_8250_port *up = up_to_u8250p(p); unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR); u32 status; - int ret = IRQ_NONE; + int ret = 0; int err; if (fisr & BIT(2)) { err = hsu_dma_get_status(&mid->dma_chip, 1, &status); if (err > 0) { serial8250_rx_dma_flush(up); - ret |= IRQ_HANDLED; + ret |= 1; } else if (err == 0) ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status); } if (fisr & BIT(1)) { err = hsu_dma_get_status(&mid->dma_chip, 0, &status); if (err > 0) - ret |= IRQ_HANDLED; + ret |= 1; else if (err == 0) ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status); } if (fisr & BIT(0)) ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR)); - return ret; + return IRQ_RETVAL(ret); } #define DNV_DMA_CHAN_OFFSET 0x80 diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h index aaff68efba5d..197eec63e501 100644 --- a/include/linux/dma/hsu.h +++ b/include/linux/dma/hsu.h @@ -41,8 +41,7 @@ struct hsu_dma_chip { /* Export to the internal users */ int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, u32 *status); -irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, - u32 status); +int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status); /* Export to the platform drivers */ int hsu_dma_probe(struct hsu_dma_chip *chip); @@ -53,10 +52,10 @@ static inline int hsu_dma_get_status(struct hsu_dma_chip *chip, { return 0; } -static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, - unsigned short nr, u32 status) +static inline int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, + u32 status) { - return IRQ_NONE; + return 0; } static inline int hsu_dma_probe(struct hsu_dma_chip *chip) { return -ENODEV; } static inline int hsu_dma_remove(struct hsu_dma_chip *chip) { return 0; } From d86467249d639f3915ee6379aef5e7c1bdd9f74b Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 14 Sep 2016 20:40:38 +0300 Subject: [PATCH 087/169] dmaengine: dmatest: Restore "memcpy" as default mode Commit 0d4cb44da6ca0e8 ("dmaengine: dmatest: Add support for scatter-gather DMA mode") changes default "dmatest" behavior by changing default mode from "memcpy" to "scatter-gather". Now "memcpy" gets back as default mode. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 53e972819ccd..cf76fc6149e5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -56,10 +56,10 @@ module_param(sg_buffers, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(sg_buffers, "Number of scatter gather buffers (default: 1)"); -static unsigned int dmatest = 1; +static unsigned int dmatest; module_param(dmatest, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dmatest, - "dmatest 0-memcpy 1-slave_sg (default: 1)"); + "dmatest 0-memcpy 1-slave_sg (default: 0)"); static unsigned int xor_sources = 3; module_param(xor_sources, uint, S_IRUGO | S_IWUSR); From ba409b31b3d37b52dda4eefcde04f5837c7ee4aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:14 +0200 Subject: [PATCH 088/169] dma-mapping: add {map,unmap}_resource to dma_map_ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add methods to handle mapping of device resources from a physical address. This is needed for example to be able to map MMIO FIFO registers to a IOMMU. Signed-off-by: Niklas Söderlund Reviewed-by: Laurent Pinchart Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- include/linux/dma-mapping.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 66533e18276c..673b7be85f7a 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -95,6 +95,12 @@ struct dma_map_ops { struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); + dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs); + void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); From 0e74b34dfc3318bf4c7e51349d453d49fb8e9e16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:15 +0200 Subject: [PATCH 089/169] dma-debug: add support for resource mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A MMIO mapped resource can not be represented by a struct page so a new debug type is needed to handle this. This patch add such type and functionality to add/remove entries and how to translate them to a physical address. Signed-off-by: Niklas Söderlund Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- include/linux/dma-debug.h | 19 ++++++++++++++ lib/dma-debug.c | 52 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index fe8cb610deac..c7d844f09c3a 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size, extern void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr); +extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr); + +extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction); + extern void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction); @@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size, { } +static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, + size_t size, int direction, + dma_addr_t dma_addr) +{ +} + +static inline void debug_dma_unmap_resource(struct device *dev, + dma_addr_t dma_addr, size_t size, + int direction) +{ +} + static inline void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index fcfa1939ac41..2ba086b0d3e7 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -43,6 +43,7 @@ enum { dma_debug_page, dma_debug_sg, dma_debug_coherent, + dma_debug_resource, }; enum map_err_types { @@ -150,8 +151,9 @@ static const char *const maperr2str[] = { [MAP_ERR_CHECKED] = "dma map error checked", }; -static const char *type2name[4] = { "single", "page", - "scather-gather", "coherent" }; +static const char *type2name[5] = { "single", "page", + "scather-gather", "coherent", + "resource" }; static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; @@ -399,6 +401,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry) static unsigned long long phys_addr(struct dma_debug_entry *entry) { + if (entry->type == dma_debug_resource) + return __pfn_to_phys(entry->pfn) + entry->offset; + return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset; } @@ -1495,6 +1500,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size, } EXPORT_SYMBOL(debug_dma_free_coherent); +void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, + int direction, dma_addr_t dma_addr) +{ + struct dma_debug_entry *entry; + + if (unlikely(dma_debug_disabled())) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_resource; + entry->dev = dev; + entry->pfn = __phys_to_pfn(addr); + entry->offset = offset_in_page(addr); + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = direction; + entry->map_err_type = MAP_ERR_NOT_CHECKED; + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_map_resource); + +void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) +{ + struct dma_debug_entry ref = { + .type = dma_debug_resource, + .dev = dev, + .dev_addr = dma_addr, + .size = size, + .direction = direction, + }; + + if (unlikely(dma_debug_disabled())) + return; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_unmap_resource); + void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { From 6f3d87968f9c8b529bc81eff5a1f45e92553493d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:16 +0200 Subject: [PATCH 090/169] dma-mapping: add dma_{map,unmap}_resource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Map/Unmap a device MMIO resource from a physical address. If no dma_map_ops method is available the operation is a no-op. Signed-off-by: Niklas Söderlund Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- Documentation/DMA-API.txt | 22 +++++++++++++++++----- include/linux/dma-mapping.h | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 1d26eeb6b5f6..6b20128fab8a 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -277,14 +277,26 @@ and parameters are provided to do partial page mapping, it is recommended that you never use these unless you really know what the cache width is. +dma_addr_t +dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) + +void +dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) + +API for mapping and unmapping for MMIO resources. All the notes and +warnings for the other mapping APIs apply here. The API should only be +used to map device MMIO resources, mapping of RAM is not permitted. + int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -In some circumstances dma_map_single() and dma_map_page() will fail to create -a mapping. A driver can check for these errors by testing the returned -DMA address with dma_mapping_error(). A non-zero return value means the mapping -could not be created and the driver should take appropriate action (e.g. -reduce current DMA mapping usage or delay and try again later). +In some circumstances dma_map_single(), dma_map_page() and dma_map_resource() +will fail to create a mapping. A driver can check for these errors by testing +the returned DMA address with dma_mapping_error(). A non-zero return value +means the mapping could not be created and the driver should take appropriate +action (e.g. reduce current DMA mapping usage or delay and try again later). int dma_map_sg(struct device *dev, struct scatterlist *sg, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 673b7be85f7a..6e00c7fdbbd3 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -264,6 +264,42 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, debug_dma_unmap_page(dev, addr, size, dir, false); } +static inline dma_addr_t dma_map_resource(struct device *dev, + phys_addr_t phys_addr, + size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + unsigned long pfn = __phys_to_pfn(phys_addr); + dma_addr_t addr; + + BUG_ON(!valid_dma_direction(dir)); + + /* Don't allow RAM to be mapped */ + BUG_ON(pfn_valid(pfn)); + + addr = phys_addr; + if (ops->map_resource) + addr = ops->map_resource(dev, phys_addr, size, dir, attrs); + + debug_dma_map_resource(dev, phys_addr, size, dir, addr); + + return addr; +} + +static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_resource) + ops->unmap_resource(dev, addr, size, dir, attrs); + debug_dma_unmap_resource(dev, addr, size, dir); +} + static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) From 24ed5d2c07e36ebaf0e8ce483a41b8e44d7499d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:17 +0200 Subject: [PATCH 091/169] arm: dma-mapping: add {map,unmap}_resource for iommu ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add methods to map/unmap device resources addresses for dma_map_ops that are IOMMU aware. This is needed to map a device MMIO register from a physical address. Signed-off-by: Niklas Söderlund Reviewed-by: Laurent Pinchart Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- arch/arm/mm/dma-mapping.c | 63 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c6834c0cfd1c..746eb29c6f0c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -2014,6 +2014,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, __free_iova(mapping, iova, len); } +/** + * arm_iommu_map_resource - map a device resource for DMA + * @dev: valid struct device pointer + * @phys_addr: physical address of resource + * @size: size of resource to map + * @dir: DMA transfer direction + */ +static dma_addr_t arm_iommu_map_resource(struct device *dev, + phys_addr_t phys_addr, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + dma_addr_t dma_addr; + int ret, prot; + phys_addr_t addr = phys_addr & PAGE_MASK; + unsigned int offset = phys_addr & ~PAGE_MASK; + size_t len = PAGE_ALIGN(size + offset); + + dma_addr = __alloc_iova(mapping, len); + if (dma_addr == DMA_ERROR_CODE) + return dma_addr; + + prot = __dma_direction_to_prot(dir) | IOMMU_MMIO; + + ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); + if (ret < 0) + goto fail; + + return dma_addr + offset; +fail: + __free_iova(mapping, dma_addr, len); + return DMA_ERROR_CODE; +} + +/** + * arm_iommu_unmap_resource - unmap a device DMA resource + * @dev: valid struct device pointer + * @dma_handle: DMA address to resource + * @size: size of resource to map + * @dir: DMA transfer direction + */ +static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); + dma_addr_t iova = dma_handle & PAGE_MASK; + unsigned int offset = dma_handle & ~PAGE_MASK; + size_t len = PAGE_ALIGN(size + offset); + + if (!iova) + return; + + iommu_unmap(mapping->domain, iova, len); + __free_iova(mapping, iova, len); +} + static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -2057,6 +2114,9 @@ struct dma_map_ops iommu_ops = { .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, + + .map_resource = arm_iommu_map_resource, + .unmap_resource = arm_iommu_unmap_resource, }; struct dma_map_ops iommu_coherent_ops = { @@ -2070,6 +2130,9 @@ struct dma_map_ops iommu_coherent_ops = { .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, + + .map_resource = arm_iommu_map_resource, + .unmap_resource = arm_iommu_unmap_resource, }; /** From c5ed08e988a35b4b7f8c54dfd9edfb592991fae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:18 +0200 Subject: [PATCH 092/169] dmaengine: rcar-dmac: group slave configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Group slave address and transfer size in own structs for source and destination. This is in preparation for hooking up the dma-mapping API to the slave addresses. Signed-off-by: Niklas Söderlund Reviewed-by: Laurent Pinchart Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 0dd953884d1d..cf983a9df3f1 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -117,15 +117,23 @@ struct rcar_dmac_desc_page { ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ sizeof(struct rcar_dmac_xfer_chunk)) +/* + * struct rcar_dmac_chan_slave - Slave configuration + * @slave_addr: slave memory address + * @xfer_size: size (in bytes) of hardware transfers + */ +struct rcar_dmac_chan_slave { + phys_addr_t slave_addr; + unsigned int xfer_size; +}; + /* * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel * @chan: base DMA channel object * @iomem: channel I/O memory base * @index: index of this channel in the controller - * @src_xfer_size: size (in bytes) of hardware transfers on the source side - * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side - * @src_slave_addr: slave source memory address - * @dst_slave_addr: slave destination memory address + * @src: slave memory address and size on the source side + * @dst: slave memory address and size on the destination side * @mid_rid: hardware MID/RID for the DMA client using this channel * @lock: protects the channel CHCR register and the desc members * @desc.free: list of free descriptors @@ -142,10 +150,8 @@ struct rcar_dmac_chan { void __iomem *iomem; unsigned int index; - unsigned int src_xfer_size; - unsigned int dst_xfer_size; - dma_addr_t src_slave_addr; - dma_addr_t dst_slave_addr; + struct rcar_dmac_chan_slave src; + struct rcar_dmac_chan_slave dst; int mid_rid; spinlock_t lock; @@ -793,13 +799,13 @@ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, case DMA_DEV_TO_MEM: chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED | RCAR_DMACHCR_RS_DMARS; - xfer_size = chan->src_xfer_size; + xfer_size = chan->src.xfer_size; break; case DMA_MEM_TO_DEV: chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC | RCAR_DMACHCR_RS_DMARS; - xfer_size = chan->dst_xfer_size; + xfer_size = chan->dst.xfer_size; break; case DMA_MEM_TO_MEM: @@ -1040,7 +1046,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } dev_addr = dir == DMA_DEV_TO_MEM - ? rchan->src_slave_addr : rchan->dst_slave_addr; + ? rchan->src.slave_addr : rchan->dst.slave_addr; return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, dir, flags, false); } @@ -1095,7 +1101,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, } dev_addr = dir == DMA_DEV_TO_MEM - ? rchan->src_slave_addr : rchan->dst_slave_addr; + ? rchan->src.slave_addr : rchan->dst.slave_addr; desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, dir, flags, true); @@ -1112,10 +1118,10 @@ static int rcar_dmac_device_config(struct dma_chan *chan, * We could lock this, but you shouldn't be configuring the * channel, while using it... */ - rchan->src_slave_addr = cfg->src_addr; - rchan->dst_slave_addr = cfg->dst_addr; - rchan->src_xfer_size = cfg->src_addr_width; - rchan->dst_xfer_size = cfg->dst_addr_width; + rchan->src.slave_addr = cfg->src_addr; + rchan->dst.slave_addr = cfg->dst_addr; + rchan->src.xfer_size = cfg->src_addr_width; + rchan->dst.xfer_size = cfg->dst_addr_width; return 0; } From 9f878603dbdb7db357fc7d6f60b55a0b9fd6e2f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Wed, 10 Aug 2016 13:22:19 +0200 Subject: [PATCH 093/169] dmaengine: rcar-dmac: add iommu support for slave transfers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable slave transfers to a device behind a IPMMU by mapping the slave addresses using the dma-mapping API. Signed-off-by: Niklas Söderlund Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/sh/rcar-dmac.c | 82 ++++++++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 8 deletions(-) diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index cf983a9df3f1..22a5e406a589 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -127,6 +127,18 @@ struct rcar_dmac_chan_slave { unsigned int xfer_size; }; +/* + * struct rcar_dmac_chan_map - Map of slave device phys to dma address + * @addr: slave dma address + * @dir: direction of mapping + * @slave: slave configuration that is mapped + */ +struct rcar_dmac_chan_map { + dma_addr_t addr; + enum dma_data_direction dir; + struct rcar_dmac_chan_slave slave; +}; + /* * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel * @chan: base DMA channel object @@ -152,6 +164,7 @@ struct rcar_dmac_chan { struct rcar_dmac_chan_slave src; struct rcar_dmac_chan_slave dst; + struct rcar_dmac_chan_map map; int mid_rid; spinlock_t lock; @@ -1029,13 +1042,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, DMA_MEM_TO_MEM, flags, false); } +static int rcar_dmac_map_slave_addr(struct dma_chan *chan, + enum dma_transfer_direction dir) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + struct rcar_dmac_chan_map *map = &rchan->map; + phys_addr_t dev_addr; + size_t dev_size; + enum dma_data_direction dev_dir; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = rchan->src.slave_addr; + dev_size = rchan->src.xfer_size; + dev_dir = DMA_TO_DEVICE; + } else { + dev_addr = rchan->dst.slave_addr; + dev_size = rchan->dst.xfer_size; + dev_dir = DMA_FROM_DEVICE; + } + + /* Reuse current map if possible. */ + if (dev_addr == map->slave.slave_addr && + dev_size == map->slave.xfer_size && + dev_dir == map->dir) + return 0; + + /* Remove old mapping if present. */ + if (map->slave.xfer_size) + dma_unmap_resource(chan->device->dev, map->addr, + map->slave.xfer_size, map->dir, 0); + map->slave.xfer_size = 0; + + /* Create new slave address map. */ + map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, + dev_dir, 0); + + if (dma_mapping_error(chan->device->dev, map->addr)) { + dev_err(chan->device->dev, + "chan%u: failed to map %zx@%pap", rchan->index, + dev_size, &dev_addr); + return -EIO; + } + + dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n", + rchan->index, dev_size, &dev_addr, &map->addr, + dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE"); + + map->slave.slave_addr = dev_addr; + map->slave.xfer_size = dev_size; + map->dir = dev_dir; + + return 0; +} + static struct dma_async_tx_descriptor * rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, unsigned long flags, void *context) { struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); - dma_addr_t dev_addr; /* Someone calling slave DMA on a generic channel? */ if (rchan->mid_rid < 0 || !sg_len) { @@ -1045,9 +1110,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return NULL; } - dev_addr = dir == DMA_DEV_TO_MEM - ? rchan->src.slave_addr : rchan->dst.slave_addr; - return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, + if (rcar_dmac_map_slave_addr(chan, dir)) + return NULL; + + return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, dir, flags, false); } @@ -1061,7 +1127,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct dma_async_tx_descriptor *desc; struct scatterlist *sgl; - dma_addr_t dev_addr; unsigned int sg_len; unsigned int i; @@ -1073,6 +1138,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, return NULL; } + if (rcar_dmac_map_slave_addr(chan, dir)) + return NULL; + sg_len = buf_len / period_len; if (sg_len > RCAR_DMAC_MAX_SG_LEN) { dev_err(chan->device->dev, @@ -1100,9 +1168,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, sg_dma_len(&sgl[i]) = period_len; } - dev_addr = dir == DMA_DEV_TO_MEM - ? rchan->src.slave_addr : rchan->dst.slave_addr; - desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, + desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, dir, flags, true); kfree(sgl); From f3086ff62b3f73caf69421aa5599b13ad8892681 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 20 Sep 2016 09:02:43 +0530 Subject: [PATCH 094/169] dmaengine: jz4740: remove unused arch header The defines in asm/mach-jz4740/dma.h are not used by driver so remove it Acked-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4740.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 9689b36c005a..d50273fed715 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -21,8 +21,6 @@ #include #include -#include - #include "virt-dma.h" #define JZ_DMA_NR_CHANS 6 From d78d6c073a43dd9c6050c1df9921f238eb25ee5d Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:25:11 +0530 Subject: [PATCH 095/169] dmaengine: jz4740: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Acked-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 739f797b40d9..51ebc3181cf2 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -114,7 +114,7 @@ config DMA_BCM2835 config DMA_JZ4740 tristate "JZ4740 DMA support" - depends on MACH_JZ4740 + depends on MACH_JZ4740 || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS From a952b28788b5356780447ba23e89ea7365b19a1f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:27:20 +0530 Subject: [PATCH 096/169] dmaengine: jz4780: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 51ebc3181cf2..56d4c3d0c160 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -120,7 +120,7 @@ config DMA_JZ4740 config DMA_JZ4780 tristate "JZ4780 DMA support" - depends on MACH_JZ4780 + depends on MACH_JZ4780 || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 4f5db8c8f8ee965436de24256bbd7a01778d4cdb Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:27:55 +0530 Subject: [PATCH 097/169] dmaengine: jz4780: make jz4780_dma_prep_dma_memcpy static Sparse complains: drivers/dma/dma-jz4780.c:399:32: warning: symbol 'jz4780_dma_prep_dma_memcpy' was not declared. Should it be static? So make this static Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index dade7c47ff18..c36c14c3f32c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -396,7 +396,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); } -struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( +static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { From 3fd386625679bd2adb94d2a3d25dd2fdd38b52e3 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 13 Sep 2016 22:18:32 +0530 Subject: [PATCH 098/169] dmaengine: coh901318: use correct print specifiers This driver when compiled on 64 bits gave warnings: drivers/dma/coh901318.c: In function 'coh901318_list_print': warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t' [-Wformat=] warning: format '%x' expects argument of type 'unsigned int', but argument 8 has type 'dma_addr_t' [-Wformat=] warning: format '%x' expects argument of type 'unsigned int', but argument 9 has type 'dma_addr_t' [-Wformat=] drivers/dma/coh901318.c: In function 'coh901318_prep_memcpy': warning: format '%x' expects argument of type 'unsigned int', but argument 6 has type 'dma_addr_t' [-Wformat=] warning: format '%x' expects argument of type 'unsigned int', but argument 7 has type 'dma_addr_t' [-Wformat=] warning: format '%d' expects argument of type 'int', but argument 8 has type 'size_t' [-Wformat=] We should use %pad to print 'dma_addr_t' values and %zu to print size_t values Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e4acd63e42aa..c0cd1d8e5b2e 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1319,10 +1319,10 @@ static void coh901318_list_print(struct coh901318_chan *cohc, int i = 0; while (l) { - dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" - ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", - i, l, l->control, l->src_addr, l->dst_addr, - l->link_addr, l->virt_link_addr); + dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad" + ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n", + i, l, l->control, &l->src_addr, &l->dst_addr, + &l->link_addr, l->virt_link_addr); i++; l = l->virt_link_addr; } @@ -2247,8 +2247,8 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, spin_lock_irqsave(&cohc->lock, flg); dev_vdbg(COHC_2_DEV(cohc), - "[%s] channel %d src 0x%x dest 0x%x size %d\n", - __func__, cohc->id, src, dest, size); + "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n", + __func__, cohc->id, &src, &dest, size); if (flags & DMA_PREP_INTERRUPT) /* Trigger interrupt after last lli */ From 66a1a51270a2ed057b41cda234cc3fe2ff7c64bf Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 13 Sep 2016 22:25:07 +0530 Subject: [PATCH 099/169] dmaengine: coh901318: return ssize_t for coh901318_debugfs_read The file_operations.read expects return as ssize_t, so update coh901318_debugfs_read to return ssize_t to fix the warning: drivers/dma/coh901318.c:1369:2: warning: initialization from incompatible pointer type [enabled by default] .read = coh901318_debugfs_read, Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index c0cd1d8e5b2e..57619adf79b4 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1335,7 +1335,7 @@ static void coh901318_list_print(struct coh901318_chan *cohc, static struct coh901318_base *debugfs_dma_base; static struct dentry *dma_dentry; -static int coh901318_debugfs_read(struct file *file, char __user *buf, +static ssize_t coh901318_debugfs_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) { u64 started_channels = debugfs_dma_base->pm.started_channels; From c021d8351f1dfa296347210ee7befd571727f0a2 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 13 Sep 2016 22:27:53 +0530 Subject: [PATCH 100/169] dmaengine: coh901318: fix pointer cast warnings On some systems, pointer can be large than unsigned int, triggering warning pointer-to-int-cast on conversion. drivers/dma/coh901318.c: In function 'coh901318_filter_id': drivers/dma/coh901318.c:1769:23: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] unsigned int ch_nr = (unsigned int) chan_id; Also, converting an iomem pointer for print leads to warn on some system which can be avoided with %p specfier drivers/dma/coh901318.c: In function 'coh901318_probe': drivers/dma/coh901318.c:2748:3: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] (u32) base->virtbase); Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 57619adf79b4..66d3507c97e4 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1766,7 +1766,7 @@ static int coh901318_resume(struct dma_chan *chan) bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) { - unsigned int ch_nr = (unsigned int) chan_id; + unsigned long ch_nr = (unsigned long) chan_id; if (ch_nr == to_coh901318_chan(chan)->id) return true; @@ -2744,8 +2744,8 @@ static int __init coh901318_probe(struct platform_device *pdev) goto err_register_of_dma; platform_set_drvdata(pdev, base); - dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", - (u32) base->virtbase); + dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%p\n", + base->virtbase); return err; From d943df870572e4774abfb16ef20c9ec28edbd94c Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 13 Sep 2016 22:55:01 +0530 Subject: [PATCH 101/169] dmaengine: coh901318: use NULL for pointer initialization Sparse complains: drivers/dma/coh901318_lli.c:78:31: warning: Using plain integer as NULL pointer drivers/dma/coh901318_lli.c:91:39: warning: Using plain integer as NULL pointer Use NULL for pointer initialization rather than plain integer Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318_lli.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 702112d547c8..d612b2e5abc4 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -75,7 +75,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) lli = head; lli->phy_this = phy; lli->link_addr = 0x00000000; - lli->virt_link_addr = 0x00000000U; + lli->virt_link_addr = NULL; for (i = 1; i < len; i++) { lli_prev = lli; @@ -88,7 +88,7 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) DEBUGFS_POOL_COUNTER_ADD(pool, 1); lli->phy_this = phy; lli->link_addr = 0x00000000; - lli->virt_link_addr = 0x00000000U; + lli->virt_link_addr = NULL; lli_prev->link_addr = phy; lli_prev->virt_link_addr = lli; From 6e450376e5877f2671db552a7693e9325e31a1cb Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:29:49 +0530 Subject: [PATCH 102/169] dmaengine: coh901318: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Acked-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 56d4c3d0c160..29919ae00381 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -102,7 +102,7 @@ config AXI_DMAC config COH901318 bool "ST-Ericsson COH901318 DMA support" select DMA_ENGINE - depends on ARCH_U300 + depends on ARCH_U300 || COMPILE_TEST help Enable support for ST-Ericsson COH 901 318 DMA. From f92e934d57518ddb5ea9fbdac06c19091aa1316e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:15:43 +0530 Subject: [PATCH 103/169] dmaengine: sa11x0: use correct print specifiers for dma_addr_t This driver when compiled on 64 bits gave warnings: drivers/dma/sa11x0-dma.c:466:27: warning: format '%x' expects argument of type 'unsigned int', but argument 4 has type 'dma_addr_t' [-Wformat=] drivers/dma/sa11x0-dma.c:554:31: warning: format '%x' expects argument of type 'unsigned int', but argument 5 has type 'dma_addr_t' [-Wformat=] drivers/dma/sa11x0-dma.c:696:34: warning: format '%x' expects argument of type 'unsigned int', but argument 5 has type 'dma_addr_t' [-Wformat=] We should use %pad to print 'dma_addr_t' values. Acked-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 43db255050d2..137fad5859f6 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -463,7 +463,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr); for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", @@ -551,8 +551,8 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( if (len > DMA_MAX_SIZE) j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { - dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - &c->vc, addr); + dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n", + &c->vc, &addr); return NULL; } } @@ -693,8 +693,8 @@ static int sa11x0_dma_device_config(struct dma_chan *chan, if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - &c->vc, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n", + &c->vc, &addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; From 872b4af48f878e361739bae7d3f300c0a35ecfd0 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:22:49 +0530 Subject: [PATCH 104/169] dmaengine: sa11x0: use correct print specifiers for u32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This driver when compiled on 64 bits gave warnings: drivers/dma/sa11x0-dma.c:494:2: warning: format ‘%zx’ expects argument of type ‘size_t’, but argument 4 has type ‘u32’ [-Wformat=] We should use %x to print 'u32' values. Acked-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 137fad5859f6..4ebc00f90f36 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -491,7 +491,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, } spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue); return ret; } From 762ff31dd4ec9d97e38bf3c1aa3c4bbd8e3e87ce Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:22:49 +0530 Subject: [PATCH 105/169] dmaengine: sa11x0: use correct print specifiers for size_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This driver when compiled on 64 bits gave warnings: drivers/dma/sa11x0-dma.c:602:2: warning: format ‘%u’ expects argument of type ‘unsigned int’, but argument 6 has type ‘size_t’ [-Wformat=] We should use %zu to print 'size_t' values. Acked-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 4ebc00f90f36..1adeb3265085 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -599,7 +599,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( txd->size = size; txd->sglen = j; - dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", + dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n", &c->vc, &txd->vd, txd->size, txd->sglen); return vchan_tx_prep(&c->vc, &txd->vd, flags); From 6947c3f2870281961225b070c22ae5a382e36b6b Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:31:42 +0530 Subject: [PATCH 106/169] dmaengine: sa11x0: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Acked-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 29919ae00381..1092a4dc0af1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -137,7 +137,7 @@ config DMA_OMAP config DMA_SA11X0 tristate "SA-11x0 DMA support" - depends on ARCH_SA1100 + depends on ARCH_SA1100 || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 567df5e9770f7b5203010fd8a5e7c34b387b90a3 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:39:56 +0530 Subject: [PATCH 107/169] dmaengine: ep93xx: use correct print specifiers for size_t This driver warns: drivers/dma/ep93xx_dma.c: In function 'ep93xx_dma_prep_slave_sg': drivers/dma/ep93xx_dma.c:1054:6: warning: format '%d' expects argument of type 'int', but argument 3 has type 'size_t' [-Wformat=] drivers/dma/ep93xx_dma.c: In function 'ep93xx_dma_prep_dma_cyclic': drivers/dma/ep93xx_dma.c:1129:5: warning: format '%d' expects argument of type 'int', but argument 3 has type 'size_t' [-Wformat=] We should use %zu to print 'size_t' values. Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 21f08cc3352b..28945cb2e62f 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1050,7 +1050,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, size_t sg_len = sg_dma_len(sg); if (sg_len > DMA_MAX_CHAN_BYTES) { - dev_warn(chan2dev(edmac), "too big transfer size %d\n", + dev_warn(chan2dev(edmac), "too big transfer size %zu\n", sg_len); goto fail; } @@ -1125,7 +1125,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, } if (period_len > DMA_MAX_CHAN_BYTES) { - dev_warn(chan2dev(edmac), "too big period length %d\n", + dev_warn(chan2dev(edmac), "too big period length %zu\n", period_len); return NULL; } From 8f913bffb4d35718914c1ecc8c1f52c216925752 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:39:56 +0530 Subject: [PATCH 108/169] dmaengine: ep93xx: don't use variables defined in global scope The driver uses same variable name in local and global context in a function, rename the local one for better readability. Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 28945cb2e62f..9833c087e63c 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1047,11 +1047,11 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first = NULL; for_each_sg(sgl, sg, sg_len, i) { - size_t sg_len = sg_dma_len(sg); + size_t len = sg_dma_len(sg); - if (sg_len > DMA_MAX_CHAN_BYTES) { + if (len > DMA_MAX_CHAN_BYTES) { dev_warn(chan2dev(edmac), "too big transfer size %zu\n", - sg_len); + len); goto fail; } @@ -1068,7 +1068,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->src_addr = edmac->runtime_addr; desc->dst_addr = sg_dma_address(sg); } - desc->size = sg_len; + desc->size = len; if (!first) first = desc; From 49ad6d7dd65b5f2ec3ed847eb64bb916cd29431e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:38:43 +0530 Subject: [PATCH 109/169] dmaengine: ep93xx: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1092a4dc0af1..3ee0b3c41b1a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -167,7 +167,7 @@ config DMA_SUN6I config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" - depends on ARCH_EP93XX + depends on ARCH_EP93XX || COMPILE_TEST select DMA_ENGINE help Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. From 3e13b386963b337de98a0ff92d2eefc5dfef6edd Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 15:53:08 +0530 Subject: [PATCH 110/169] dmaengine: mmp_tdma: use correct print specifiers for size_t This driver warns: drivers/dma/mmp_tdma.c: In function 'mmp_tdma_prep_dma_cyclic': drivers/dma/mmp_tdma.c:437:5: warning: format '%d' expects argument of type 'int', but argument 3 has type 'size_t' [-Wformat=] We should use %zu to print 'size_t' values. Acked-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/mmp_tdma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index b3441f57a364..d7422b1bf406 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -433,7 +433,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( if (period_len > TDMA_MAX_XFER_BYTES) { dev_err(tdmac->dev, - "maximum period size exceeded: %d > %d\n", + "maximum period size exceeded: %zu > %d\n", period_len, TDMA_MAX_XFER_BYTES); goto err_out; } From cd3a792a77bc4e72e917efcf9dfc7f33cbb2fda8 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:55:56 +0530 Subject: [PATCH 111/169] dmaengine: mmp_pdma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Acked-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3ee0b3c41b1a..7674a44f72b0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -297,7 +297,7 @@ config LPC18XX_DMAMUX config MMP_PDMA bool "MMP PDMA support" - depends on (ARCH_MMP || ARCH_PXA) + depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST select DMA_ENGINE help Support the MMP PDMA engine for PXA and MMP platform. From 93d05f1ec644c97535159649de7f8e6731841336 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:57:09 +0530 Subject: [PATCH 112/169] dmaengine: mmp_tdma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. While at it, to fix build on other archs, select MMP_SRAM only for ARCH_MMP and also fix the platform header Suggested-by: Arnd Bergmann Acked-by: Zhangfei Gao Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 4 ++-- include/linux/platform_data/dma-mmp_tdma.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 7674a44f72b0..faf702321350 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -304,9 +304,9 @@ config MMP_PDMA config MMP_TDMA bool "MMP Two-Channel DMA support" - depends on ARCH_MMP + depends on ARCH_MMP || COMPILE_TEST select DMA_ENGINE - select MMP_SRAM + select MMP_SRAM if ARCH_MMP help Support the MMP Two-Channel DMA engine. This engine used for MMP Audio DMA and pxa910 SQU. diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h index 0c72886030ef..422d4504dbac 100644 --- a/include/linux/platform_data/dma-mmp_tdma.h +++ b/include/linux/platform_data/dma-mmp_tdma.h @@ -28,7 +28,7 @@ struct sram_platdata { int granularity; }; -#ifdef CONFIG_ARM +#ifdef CONFIG_MMP_SRAM extern struct gen_pool *sram_get_gpool(char *pool_name); #else static inline struct gen_pool *sram_get_gpool(char *pool_name) From 4fbf3717a467c6d9bcd8fdce03fdd5074b14a03f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:57:51 +0530 Subject: [PATCH 113/169] dmaengine: stm32-dma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index faf702321350..a86578fdbfc3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -439,9 +439,8 @@ config STE_DMA40 config STM32_DMA bool "STMicroelectronics STM32 DMA support" - depends on ARCH_STM32 + depends on ARCH_STM32 || COMPILE_TEST select DMA_ENGINE - select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the on-chip DMA controller on STMicroelectronics From e97adb49b415b3ff487e9fd0d759e843baf6f69e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 15:59:10 +0530 Subject: [PATCH 114/169] dmaengine: stm32-dma: make stm32_dma_set_config static Sparse complains: drivers/dma/stm32-dma.c:957:6: warning: symbol 'stm32_dma_set_config' was not declared. Should it be static? SO make stm32_dma_set_config static. Signed-off-by: Vinod Koul --- drivers/dma/stm32-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 047476a1383d..307547f4848d 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -954,7 +954,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); } -void stm32_dma_set_config(struct stm32_dma_chan *chan, +static void stm32_dma_set_config(struct stm32_dma_chan *chan, struct stm32_dma_cfg *cfg) { stm32_dma_clear_reg(&chan->chan_reg); From abdad50d1d184e0c663b57015d9bbc11701207e7 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 16:15:20 +0530 Subject: [PATCH 115/169] dmaengine: s3c24xx: use correct print specifiers for size_t This driver warns: drivers/dma/s3c24xx-dma.c: In function 's3c24xx_dma_prep_memcpy': drivers/dma/s3c24xx-dma.c:826:2: warning: format '%d' expects argument of type 'int', but argument 4 has type 'size_t' [-Wformat=] drivers/dma/s3c24xx-dma.c:830:3: warning: format '%d' expects argument of type 'int', but argument 3 has type 'size_t' [-Wformat=] We should use %zu to print 'size_t' values. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/s3c24xx-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index ce67075589f5..b4a7041c4f81 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -823,11 +823,11 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( struct s3c24xx_sg *dsg; int src_mod, dest_mod; - dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n", + dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n", len, s3cchan->name); if ((len & S3C24XX_DCON_TC_MASK) != len) { - dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len); + dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len); return NULL; } From 9d0c6f2506c5a1ce0fb66e202c639688ec8c757f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 14 Sep 2016 16:21:32 +0530 Subject: [PATCH 116/169] dmaengine: s3c24xx: fix pointer cast warnings On some systems, pointer can be large than unsigned int, triggering warning pointer-to-int-cast on conversion. drivers/dma/s3c24xx-dma.c: In function 's3c24xx_dma_filter': drivers/dma/s3c24xx-dma.c:1421:24: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] Use a long value for type conversion. Suggested-by: Arnd Bergmann Reviewed-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/s3c24xx-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index b4a7041c4f81..ae1937a32e71 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1418,7 +1418,7 @@ bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) s3cchan = to_s3c24xx_dma_chan(chan); - return s3cchan->id == (int)param; + return s3cchan->id == (uintptr_t)param; } EXPORT_SYMBOL(s3c24xx_dma_filter); From 1609db6f088eed4a0821641cb6d17ae0e79c779a Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 16:00:41 +0530 Subject: [PATCH 117/169] dmaengine: s3c24xx: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a86578fdbfc3..1bf84f246868 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -450,7 +450,7 @@ config STM32_DMA config S3C24XX_DMAC bool "Samsung S3C24XX DMA support" - depends on ARCH_S3C24XX + depends on ARCH_S3C24XX || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From 4aa258af6532437910aebb3e8a6d47ffda8c1070 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 16:07:05 +0530 Subject: [PATCH 118/169] dmaengine: timb-dma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1bf84f246868..b6116c82531f 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -496,7 +496,7 @@ config TEGRA210_ADMA config TIMB_DMA tristate "Timberdale FPGA DMA support" - depends on MFD_TIMBERDALE + depends on MFD_TIMBERDALE || COMPILE_TEST select DMA_ENGINE help Enable support for the Timberdale FPGA DMA engine. From 854d4bd25b05ce1d9583b9b072c10070059d7944 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 16:10:07 +0530 Subject: [PATCH 119/169] dmaengine: zxdma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b6116c82531f..674e6859086b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -560,7 +560,7 @@ config XILINX_ZYNQMP_DMA config ZX_DMA tristate "ZTE ZX296702 DMA support" - depends on ARCH_ZX + depends on ARCH_ZX || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help From f43d9fbe0ba8a247dea8ab9d348e2b53c07efad3 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 20 Sep 2016 10:32:47 +0530 Subject: [PATCH 120/169] dmaengine: sun4i: fix kconfig unmet direct dependencies We have unmet direct dependencies as DMA_SUN4I selects DMA_OF so remove the selection warning: (DMA_SUN4I && MOXART_DMA && STM32_DMA) selects DMA_OF which has unmet direct dependencies (DMADEVICES && OF) Suggested-by: Arnd Bergmann Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 674e6859086b..fe8ed06331b3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -150,7 +150,6 @@ config DMA_SUN4I depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I) select DMA_ENGINE - select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the DMA controller present in the sun4i, From 4dfc97918474a8cb034d3b5d4541b46c98e23474 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 20 Sep 2016 10:32:47 +0530 Subject: [PATCH 121/169] dmaengine: moxart: fix kconfig unmet direct dependencies We have unmet direct dependencies as DMA_SUN4I selects DMA_OF so remove the selection warning: (DMA_SUN4I && MOXART_DMA && STM32_DMA) selects DMA_OF which has unmet direct dependencies (DMADEVICES && OF) Suggested-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fe8ed06331b3..b27a263df2e9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -315,7 +315,6 @@ config MOXART_DMA tristate "MOXART DMA support" depends on ARCH_MOXART select DMA_ENGINE - select DMA_OF select DMA_VIRTUAL_CHANNELS help Enable support for the MOXA ART SoC DMA controller. From cf80ecf7a277e7908264f822c721f93403cf518f Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 16 Sep 2016 17:56:07 +0200 Subject: [PATCH 122/169] ste_dma40: Use kmalloc_array() in d40_lcla_allocate() * A multiplication for the size determination of a memory allocation indicated that an array data structure should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected by using the Coccinelle software. * Replace the specification of a data type by a pointer dereference to make the corresponding size determination a bit safer according to the Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 8b18e44a02d5..156199d168bf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3411,9 +3411,9 @@ static int __init d40_lcla_allocate(struct d40_base *base) * To full fill this hardware requirement without wasting 256 kb * we allocate pages until we get an aligned one. */ - page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, - GFP_KERNEL); - + page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, + sizeof(*page_list), + GFP_KERNEL); if (!page_list) { ret = -ENOMEM; goto failure; From 2c7f2f20da9d3fe13ed08d1661ebc12d4ce0cab9 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 08:21:30 +0200 Subject: [PATCH 123/169] ste_dma40: Return directly after a failed kmalloc_array() Return directly after a memory allocation failed in this function at the beginning. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 156199d168bf..b4dd5910ffb1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3414,10 +3414,8 @@ static int __init d40_lcla_allocate(struct d40_base *base) page_list = kmalloc_array(MAX_LCLA_ALLOC_ATTEMPTS, sizeof(*page_list), GFP_KERNEL); - if (!page_list) { - ret = -ENOMEM; - goto failure; - } + if (!page_list) + return -ENOMEM; /* Calculating how many pages that are required */ base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; From aae32ec6e39a513f2049b2d37825957971d4f7f1 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 08:23:37 +0200 Subject: [PATCH 124/169] ste_dma40: Rename a jump label in d40_lcla_allocate() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index b4dd5910ffb1..6f198a31e619 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3431,7 +3431,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) for (j = 0; j < i; j++) free_pages(page_list[j], base->lcla_pool.pages); - goto failure; + goto free_page_list; } if ((virt_to_phys((void *)page_list[i]) & @@ -3458,7 +3458,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) GFP_KERNEL); if (!base->lcla_pool.base_unaligned) { ret = -ENOMEM; - goto failure; + goto free_page_list; } base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, @@ -3471,12 +3471,12 @@ static int __init d40_lcla_allocate(struct d40_base *base) if (dma_mapping_error(base->dev, pool->dma_addr)) { pool->dma_addr = 0; ret = -ENOMEM; - goto failure; + goto free_page_list; } writel(virt_to_phys(base->lcla_pool.base), base->virtbase + D40_DREG_LCLA); -failure: + free_page_list: kfree(page_list); return ret; } From abac5bac829cc9d8cf178344b4f34b2264927672 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 08:24:46 +0200 Subject: [PATCH 125/169] ste_dma40: Move an assignment in d40_lcla_allocate() Move one assignment for the local variable "ret" so that its setting will only be performed after corresponding data processing succeeded by this function. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 6f198a31e619..47acb61cb15a 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3404,7 +3404,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) struct d40_lcla_pool *pool = &base->lcla_pool; unsigned long *page_list; int i, j; - int ret = 0; + int ret; /* * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, @@ -3476,6 +3476,7 @@ static int __init d40_lcla_allocate(struct d40_base *base) writel(virt_to_phys(base->lcla_pool.base), base->virtbase + D40_DREG_LCLA); + ret = 0; free_page_list: kfree(page_list); return ret; From 71660223f50036e67a2a66cf55815fa665639d3a Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 08:28:05 +0200 Subject: [PATCH 126/169] ste_dma40: Improve a size determination in d40_of_probe() Replace the specification of a data structure by a pointer dereference as the parameter for the operator "sizeof" to make the corresponding size determination a bit safer according to the Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 47acb61cb15a..4a651b23e577 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3489,9 +3489,7 @@ static int __init d40_of_probe(struct platform_device *pdev, int num_phy = 0, num_memcpy = 0, num_disabled = 0; const __be32 *list; - pdata = devm_kzalloc(&pdev->dev, - sizeof(struct stedma40_platform_data), - GFP_KERNEL); + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; From e349d4b7317818cbb21096ead26420c80819ddd4 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 09:56:32 +0200 Subject: [PATCH 127/169] ste_dma40: Replace four kzalloc() calls by kcalloc() in d40_hw_detect_init() * The script "checkpatch.pl" can point information out like the following. WARNING: Prefer kcalloc over kzalloc with multiply Thus fix the affected source code places. * Replace the specification of data types by pointer dereferences to make the corresponding size determination a bit safer according to the Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4a651b23e577..a426abd55dee 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3283,19 +3283,20 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a); } - base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), + base->phy_res = kcalloc(num_phy_chans, + sizeof(*base->phy_res), GFP_KERNEL); if (!base->phy_res) goto failure; - base->lookup_phy_chans = kzalloc(num_phy_chans * - sizeof(struct d40_chan *), + base->lookup_phy_chans = kcalloc(num_phy_chans, + sizeof(*base->lookup_phy_chans), GFP_KERNEL); if (!base->lookup_phy_chans) goto failure; - base->lookup_log_chans = kzalloc(num_log_chans * - sizeof(struct d40_chan *), + base->lookup_log_chans = kcalloc(num_log_chans, + sizeof(*base->lookup_log_chans), GFP_KERNEL); if (!base->lookup_log_chans) goto failure; @@ -3306,9 +3307,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->reg_val_backup_chan) goto failure; - base->lcla_pool.alloc_map = - kzalloc(num_phy_chans * sizeof(struct d40_desc *) - * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); + base->lcla_pool.alloc_map = kcalloc(num_phy_chans + * D40_LCLA_LINK_PER_EVENT_GRP, + sizeof(*base->lcla_pool.alloc_map), + GFP_KERNEL); if (!base->lcla_pool.alloc_map) goto failure; From 28c01058b28527be2a81e8ba2a53437910defbf3 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 11:44:55 +0200 Subject: [PATCH 128/169] ste_dma40: Use kmalloc_array() in d40_hw_detect_init() A multiplication for the size determination of a memory allocation indicated that an array data structure should be processed. Thus use the corresponding function "kmalloc_array". This issue was detected also by using the Coccinelle software. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index a426abd55dee..4892c23b6c7b 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3301,9 +3301,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lookup_log_chans) goto failure; - base->reg_val_backup_chan = kmalloc(base->num_phy_chans * - sizeof(d40_backup_regs_chan), - GFP_KERNEL); + base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans, + sizeof(d40_backup_regs_chan), + GFP_KERNEL); if (!base->reg_val_backup_chan) goto failure; From f4534adbcfbb38a21691ca8dfdc8750689d8bcc9 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 14:10:47 +0200 Subject: [PATCH 129/169] ste_dma40: Less checks in d40_hw_detect_init() after error detection Four checks could be repeated by the d40_hw_detect_init() function during error handling even if the passed variables contained a null pointer. * Adjust jump targets according to the Linux coding style convention. * Call the interface "iounmap" only once at the end. * Delete the repeated checks which became unnecessary with this refactoring. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 67 ++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4892c23b6c7b..e4c5c8b91e28 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3160,27 +3160,27 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { d40_err(&pdev->dev, "No matching clock found\n"); - goto failure; + goto check_prepare_enabled; } clk_ret = clk_prepare_enable(clk); if (clk_ret) { d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); - goto failure; + goto disable_unprepare; } /* Get IO for DMAC base address */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); if (!res) - goto failure; + goto disable_unprepare; if (request_mem_region(res->start, resource_size(res), D40_NAME " I/O base") == NULL) - goto failure; + goto release_region; virtbase = ioremap(res->start, resource_size(res)); if (!virtbase) - goto failure; + goto release_region; /* This is just a regular AMBA PrimeCell ID actually */ for (pid = 0, i = 0; i < 4; i++) @@ -3192,13 +3192,13 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (cid != AMBA_CID) { d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); - goto failure; + goto unmap_io; } if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", AMBA_MANF_BITS(pid), AMBA_VENDOR_ST); - goto failure; + goto unmap_io; } /* * HW revision: @@ -3212,7 +3212,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) rev = AMBA_REV_BITS(pid); if (rev < 2) { d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); - goto failure; + goto unmap_io; } /* The number of physical channels on this HW */ @@ -3238,7 +3238,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) - goto failure; + goto unmap_io; base->rev = rev; base->clk = clk; @@ -3287,63 +3287,62 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) sizeof(*base->phy_res), GFP_KERNEL); if (!base->phy_res) - goto failure; + goto free_base; base->lookup_phy_chans = kcalloc(num_phy_chans, sizeof(*base->lookup_phy_chans), GFP_KERNEL); if (!base->lookup_phy_chans) - goto failure; + goto free_phy_res; base->lookup_log_chans = kcalloc(num_log_chans, sizeof(*base->lookup_log_chans), GFP_KERNEL); if (!base->lookup_log_chans) - goto failure; + goto free_phy_chans; base->reg_val_backup_chan = kmalloc_array(base->num_phy_chans, sizeof(d40_backup_regs_chan), GFP_KERNEL); if (!base->reg_val_backup_chan) - goto failure; + goto free_log_chans; base->lcla_pool.alloc_map = kcalloc(num_phy_chans * D40_LCLA_LINK_PER_EVENT_GRP, sizeof(*base->lcla_pool.alloc_map), GFP_KERNEL); if (!base->lcla_pool.alloc_map) - goto failure; + goto free_backup_chan; base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 0, SLAB_HWCACHE_ALIGN, NULL); if (base->desc_slab == NULL) - goto failure; + goto free_map; return base; - -failure: + free_map: + kfree(base->lcla_pool.alloc_map); + free_backup_chan: + kfree(base->reg_val_backup_chan); + free_log_chans: + kfree(base->lookup_log_chans); + free_phy_chans: + kfree(base->lookup_phy_chans); + free_phy_res: + kfree(base->phy_res); + free_base: + kfree(base); + unmap_io: + iounmap(virtbase); + release_region: + release_mem_region(res->start, resource_size(res)); + check_prepare_enabled: if (!clk_ret) + disable_unprepare: clk_disable_unprepare(clk); if (!IS_ERR(clk)) clk_put(clk); - if (virtbase) - iounmap(virtbase); - if (res) - release_mem_region(res->start, - resource_size(res)); - if (virtbase) - iounmap(virtbase); - - if (base) { - kfree(base->lcla_pool.alloc_map); - kfree(base->reg_val_backup_chan); - kfree(base->lookup_log_chans); - kfree(base->lookup_phy_chans); - kfree(base->phy_res); - kfree(base); - } - return NULL; } From 11f7a8d19bc85c82241a6540cd83d2aad7e94a04 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 14:34:18 +0200 Subject: [PATCH 130/169] ste_dma40: Delete unnecessary variable initialisations in d40_hw_detect_init() Five local variables will be set to an appropriate value a bit later. Thus omit the explicit initialisation which became unnecessary with a previous update step. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index e4c5c8b91e28..cead96b6db42 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3144,11 +3144,11 @@ static int __init d40_phy_res_init(struct d40_base *base) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { struct stedma40_platform_data *plat_data = dev_get_platdata(&pdev->dev); - struct clk *clk = NULL; - void __iomem *virtbase = NULL; - struct resource *res = NULL; - struct d40_base *base = NULL; - int num_log_chans = 0; + struct clk *clk; + void __iomem *virtbase; + struct resource *res; + struct d40_base *base; + int num_log_chans; int num_phy_chans; int num_memcpy_chans; int clk_ret = -EINVAL; From 876e023524051e909e48684f4b534f0803df7468 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 14:36:26 +0200 Subject: [PATCH 131/169] ste_dma40: Adjust the position of a jump label in d40_probe() Add a space character before a single jump label in this function according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index cead96b6db42..03adf918d889 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3730,7 +3730,7 @@ failure: kfree(base->lookup_phy_chans); kfree(base->phy_res); kfree(base); -report_failure: + report_failure: d40_err(&pdev->dev, "probe failed\n"); return ret; } From d7b7ecce4bcb15e92e114f0034ece6bde6b339b8 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 14:50:53 +0200 Subject: [PATCH 132/169] ste_dma40: Rename a jump label in d40_probe() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 03adf918d889..f22be794277c 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -3572,7 +3572,7 @@ static int __init d40_probe(struct platform_device *pdev) if (!res) { ret = -ENOENT; d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); - goto failure; + goto destroy_cache; } base->lcpa_size = resource_size(res); base->phy_lcpa = res->start; @@ -3581,7 +3581,7 @@ static int __init d40_probe(struct platform_device *pdev) D40_NAME " I/O lcpa") == NULL) { ret = -EBUSY; d40_err(&pdev->dev, "Failed to request LCPA region %pR\n", res); - goto failure; + goto destroy_cache; } /* We make use of ESRAM memory for this. */ @@ -3597,7 +3597,7 @@ static int __init d40_probe(struct platform_device *pdev) if (!base->lcpa_base) { ret = -ENOMEM; d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); - goto failure; + goto destroy_cache; } /* If lcla has to be located in ESRAM we don't need to allocate */ if (base->plat_data->use_esram_lcla) { @@ -3607,14 +3607,14 @@ static int __init d40_probe(struct platform_device *pdev) ret = -ENOENT; d40_err(&pdev->dev, "No \"lcla_esram\" memory resource\n"); - goto failure; + goto destroy_cache; } base->lcla_pool.base = ioremap(res->start, resource_size(res)); if (!base->lcla_pool.base) { ret = -ENOMEM; d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); - goto failure; + goto destroy_cache; } writel(res->start, base->virtbase + D40_DREG_LCLA); @@ -3622,7 +3622,7 @@ static int __init d40_probe(struct platform_device *pdev) ret = d40_lcla_allocate(base); if (ret) { d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); - goto failure; + goto destroy_cache; } } @@ -3633,7 +3633,7 @@ static int __init d40_probe(struct platform_device *pdev) ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); if (ret) { d40_err(&pdev->dev, "No IRQ defined\n"); - goto failure; + goto destroy_cache; } if (base->plat_data->use_esram_lcla) { @@ -3643,7 +3643,7 @@ static int __init d40_probe(struct platform_device *pdev) d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); ret = PTR_ERR(base->lcpa_regulator); base->lcpa_regulator = NULL; - goto failure; + goto destroy_cache; } ret = regulator_enable(base->lcpa_regulator); @@ -3652,7 +3652,7 @@ static int __init d40_probe(struct platform_device *pdev) "Failed to enable lcpa_regulator\n"); regulator_put(base->lcpa_regulator); base->lcpa_regulator = NULL; - goto failure; + goto destroy_cache; } } @@ -3667,13 +3667,13 @@ static int __init d40_probe(struct platform_device *pdev) ret = d40_dmaengine_init(base, num_reserved_chans); if (ret) - goto failure; + goto destroy_cache; base->dev->dma_parms = &base->dma_parms; ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); if (ret) { d40_err(&pdev->dev, "Failed to set dma max seg size\n"); - goto failure; + goto destroy_cache; } d40_hw_init(base); @@ -3687,8 +3687,7 @@ static int __init d40_probe(struct platform_device *pdev) dev_info(base->dev, "initialized\n"); return 0; - -failure: + destroy_cache: kmem_cache_destroy(base->desc_slab); if (base->virtbase) iounmap(base->virtbase); From c9909935a854941b0a4c299e6b5af5c178f64e93 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:10:15 +0200 Subject: [PATCH 133/169] ste_dma40: Rename jump labels in d40_dmaengine_init() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index f22be794277c..4b89a3523dc1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2891,7 +2891,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, if (err) { d40_err(base->dev, "Failed to register slave channels\n"); - goto failure1; + goto exit; } d40_chan_init(base, &base->dma_memcpy, base->log_chans, @@ -2908,7 +2908,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, if (err) { d40_err(base->dev, "Failed to register memcpy only channels\n"); - goto failure2; + goto unregister_slave; } d40_chan_init(base, &base->dma_both, base->phy_chans, @@ -2926,14 +2926,14 @@ static int __init d40_dmaengine_init(struct d40_base *base, if (err) { d40_err(base->dev, "Failed to register logical and physical capable channels\n"); - goto failure3; + goto unregister_memcpy; } return 0; -failure3: + unregister_memcpy: dma_async_device_unregister(&base->dma_memcpy); -failure2: + unregister_slave: dma_async_device_unregister(&base->dma_slave); -failure1: + exit: return err; } From 8452b85906b6a731b619baba30c0a14fedbafdaa Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:15:15 +0200 Subject: [PATCH 134/169] ste_dma40: Rename a jump label in d40_alloc_chan_resources() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 4b89a3523dc1..12798d2aa7e0 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2426,7 +2426,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) err = d40_config_memcpy(d40c); if (err) { chan_err(d40c, "Failed to configure memcpy channel\n"); - goto fail; + goto mark_last_busy; } } @@ -2434,7 +2434,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) if (err) { chan_err(d40c, "Failed to allocate channel\n"); d40c->configured = false; - goto fail; + goto mark_last_busy; } pm_runtime_get_sync(d40c->base->dev); @@ -2468,7 +2468,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) */ if (is_free_phy) d40_config_write(d40c); -fail: + mark_last_busy: pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); From 78c6e1a5943b464c47e864ca1092287fa4aa219b Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:34:07 +0200 Subject: [PATCH 135/169] ste_dma40: One check less in d40_prep_sg() after error detection * Adjust jump targets according to the Linux coding style convention. * Delete a repeated check which became unnecessary with this refactoring. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 12798d2aa7e0..d587fd3a98a4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2253,7 +2253,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); if (desc == NULL) - goto err; + goto unlock; if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; @@ -2273,7 +2273,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (ret) { chan_err(chan, "Failed to prepare %s sg job: %d\n", chan_is_logical(chan) ? "log" : "phy", ret); - goto err; + goto free_desc; } /* @@ -2285,10 +2285,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, spin_unlock_irqrestore(&chan->lock, flags); return &desc->txd; - -err: - if (desc) - d40_desc_free(chan, desc); + free_desc: + d40_desc_free(chan, desc); + unlock: spin_unlock_irqrestore(&chan->lock, flags); return NULL; } From 444fa14746c1c19384f91490ed4c19c67517949e Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:40:05 +0200 Subject: [PATCH 136/169] ste_dma40: Move two assignments in d40_prep_sg() Move assignments for two local variables so that their setting will only be performed after corresponding data processing succeeded by this function. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d587fd3a98a4..43f4e25c5aa2 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2238,8 +2238,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, enum dma_transfer_direction direction, unsigned long dma_flags) { struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); - dma_addr_t src_dev_addr = 0; - dma_addr_t dst_dev_addr = 0; + dma_addr_t src_dev_addr; + dma_addr_t dst_dev_addr; struct d40_desc *desc; unsigned long flags; int ret; @@ -2258,6 +2258,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; + src_dev_addr = 0; + dst_dev_addr = 0; if (direction == DMA_DEV_TO_MEM) src_dev_addr = chan->runtime_addr; else if (direction == DMA_MEM_TO_DEV) From 254e1254ff37239f946350fb5e93b8eea069b223 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:51:37 +0200 Subject: [PATCH 137/169] ste_dma40: Rename a jump label in d40_prep_desc() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 43f4e25c5aa2..087861af983e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2210,13 +2210,13 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, cfg->dst_info.data_width); if (desc->lli_len < 0) { chan_err(chan, "Unaligned size\n"); - goto err; + goto free_desc; } ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); if (ret < 0) { chan_err(chan, "Could not allocate lli\n"); - goto err; + goto free_desc; } desc->lli_current = 0; @@ -2226,8 +2226,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, dma_async_tx_descriptor_init(&desc->txd, &chan->chan); return desc; - -err: + free_desc: d40_desc_free(chan, desc); return NULL; } From 86145910697891c6adb8fc11c48d59cf1ec4a842 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 15:54:12 +0200 Subject: [PATCH 138/169] ste_dma40: Move an assignment in d40_prep_desc() Move one assignment for the local variable "cfg" so that its setting will only be performed after a call of the function "d40_desc_get" succeeded by this function. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 087861af983e..07e7eb39eda1 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2198,7 +2198,7 @@ static struct d40_desc * d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, unsigned int sg_len, unsigned long dma_flags) { - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; + struct stedma40_chan_cfg *cfg; struct d40_desc *desc; int ret; @@ -2206,6 +2206,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, if (!desc) return NULL; + cfg = &chan->dma_cfg; desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, cfg->dst_info.data_width); if (desc->lli_len < 0) { From 5a5eecb36b82288d92bacbdb3dd30fa4cc741e90 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:00:05 +0200 Subject: [PATCH 139/169] ste_dma40: Rename a jump label in d40_is_paused() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 07e7eb39eda1..55d0df9d1e38 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2094,8 +2094,7 @@ static bool d40_is_paused(struct d40_chan *d40c) D40_CHAN_POS(d40c->phy_chan->num); if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) is_paused = true; - - goto _exit; + goto unlock; } if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || @@ -2105,7 +2104,7 @@ static bool d40_is_paused(struct d40_chan *d40c) status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { chan_err(d40c, "Unknown direction\n"); - goto _exit; + goto unlock; } status = (status & D40_EVENTLINE_MASK(event)) >> @@ -2113,7 +2112,7 @@ static bool d40_is_paused(struct d40_chan *d40c) if (status != D40_DMA_RUN) is_paused = true; -_exit: + unlock: spin_unlock_irqrestore(&d40c->lock, flags); return is_paused; From e714b470af267a300a98b4d97aa5b800503fa5b9 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:04:46 +0200 Subject: [PATCH 140/169] ste_dma40: Rename a jump label in d40_free_dma() Adjust a jump label according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 55d0df9d1e38..66e2d503c521 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2047,7 +2047,7 @@ static int d40_free_dma(struct d40_chan *d40c) res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { chan_err(d40c, "stop failed\n"); - goto out; + goto mark_last_busy; } d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); @@ -2065,8 +2065,7 @@ static int d40_free_dma(struct d40_chan *d40c) d40c->busy = false; d40c->phy_chan = NULL; d40c->configured = false; -out: - + mark_last_busy: pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); return res; From f19b8ee88f5311bc5a8661c618d33f1e1002d59e Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:10:41 +0200 Subject: [PATCH 141/169] ste_dma40: Rename a jump label in d40_alloc_mask_free() Adjust a jump label according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 66e2d503c521..a4f965f02444 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1831,7 +1831,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, phy->allocated_dst = D40_ALLOC_FREE; phy->allocated_src = D40_ALLOC_FREE; is_free = true; - goto out; + goto unlock; } /* Logical channel */ @@ -1847,8 +1847,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, is_free = ((phy->allocated_src | phy->allocated_dst) == D40_ALLOC_FREE); - -out: + unlock: spin_unlock_irqrestore(&phy->lock, flags); return is_free; From 8eff80e49f8fd4e1fb88c74b81b8bad33e4f1328 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:16:42 +0200 Subject: [PATCH 142/169] ste_dma40: Rename jump labels in d40_alloc_mask_set() Adjust jump labels according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index a4f965f02444..d7954d0d6a3e 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1780,42 +1780,40 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy, phy->allocated_dst == D40_ALLOC_FREE) { phy->allocated_dst = D40_ALLOC_PHY; phy->allocated_src = D40_ALLOC_PHY; - goto found; + goto found_unlock; } else - goto not_found; + goto not_found_unlock; } /* Logical channel */ if (is_src) { if (phy->allocated_src == D40_ALLOC_PHY) - goto not_found; + goto not_found_unlock; if (phy->allocated_src == D40_ALLOC_FREE) phy->allocated_src = D40_ALLOC_LOG_FREE; if (!(phy->allocated_src & BIT(log_event_line))) { phy->allocated_src |= BIT(log_event_line); - goto found; + goto found_unlock; } else - goto not_found; + goto not_found_unlock; } else { if (phy->allocated_dst == D40_ALLOC_PHY) - goto not_found; + goto not_found_unlock; if (phy->allocated_dst == D40_ALLOC_FREE) phy->allocated_dst = D40_ALLOC_LOG_FREE; if (!(phy->allocated_dst & BIT(log_event_line))) { phy->allocated_dst |= BIT(log_event_line); - goto found; - } else - goto not_found; + goto found_unlock; + } } - -not_found: + not_found_unlock: spin_unlock_irqrestore(&phy->lock, flags); return false; -found: + found_unlock: spin_unlock_irqrestore(&phy->lock, flags); return true; } From d4cd217ac5486b4f05222fc3e49427d9d855d628 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:23:43 +0200 Subject: [PATCH 143/169] ste_dma40: Rename a jump label in dma_tasklet() Adjust a jump label according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index d7954d0d6a3e..95957e23e978 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1607,7 +1607,7 @@ static void dma_tasklet(unsigned long data) /* Check if we have reached here for cyclic job */ d40d = d40_first_active_get(d40c); if (d40d == NULL || !d40d->cyclic) - goto err; + goto check_pending_tx; } if (!d40d->cyclic) @@ -1650,8 +1650,7 @@ static void dma_tasklet(unsigned long data) callback(callback_param); return; - -err: + check_pending_tx: /* Rescue manouver if receiving double interrupts */ if (d40c->pending_tx > 0) d40c->pending_tx--; From b140ea0fc59ed4b9e99fe9cc0e58f15594d95976 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:28:54 +0200 Subject: [PATCH 144/169] ste_dma40: Rename a jump label in __d40_execute_command_phy() Adjust a jump label according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 95957e23e978..82314b7ff990 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1083,7 +1083,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c, D40_CHAN_POS(d40c->phy_chan->num); if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) - goto done; + goto unlock; } wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); @@ -1119,7 +1119,7 @@ static int __d40_execute_command_phy(struct d40_chan *d40c, } } -done: + unlock: spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); return ret; } From 4d8673a0494a0f20bead7aea7dc5906c74451554 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sat, 17 Sep 2016 16:39:06 +0200 Subject: [PATCH 145/169] ste_dma40: Rename a jump label in d40_log_lli_to_lcxa() Adjust a jump label according to the current Linux coding style convention. Signed-off-by: Markus Elfring Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 82314b7ff990..b3af3c94c0bf 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -874,7 +874,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) } if (curr_lcla < 0) - goto out; + goto set_current; for (; lli_current < lli_len; lli_current++) { unsigned int lcla_offset = chan->phy_chan->num * 1024 + @@ -925,8 +925,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) break; } } - -out: + set_current: desc->lli_current = lli_current; } From 34681d84a0f7cc22ded1413dc79eef8a2f23d9c3 Mon Sep 17 00:00:00 2001 From: Sam Van Den Berge Date: Thu, 22 Sep 2016 20:51:15 +0200 Subject: [PATCH 146/169] dmaengine: s3c24xx: Add dma_slave_map for s3c2440 devices This patch updates the s3c24xx dma driver to be able to pass a dma_slave_map array via the platform data. This is needed to be able to use the new, simpler dmaengine API [1]. I used the virtual DMA channels as a parameter for the dma_filter function. By doing that, I could reuse the existing filter function in drivers/dma/s3c24xx-dma.c. I have tested this on my mini2440 board with the audio driver. According to my observations, dma_request_slave_channel in the function dmaengine_pcm_new in the file sound/soc/soc-generic-dmaengine-pcm.c now returns a valid DMA channel whereas before no DMA channel was returned at that point. Entries for DMACH_XD0, DMACH_XD1 and DMACH_TIMER are missing because I don't realy know which driver to use for these. [1] http://lists.infradead.org/pipermail/linux-arm-kernel/2015-December/393635.html Signed-off-by: Sam Van Den Berge Reviewed-by: Sylwester Nawrocki Acked-by: Arnd Bergmann Acked-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul --- arch/arm/mach-s3c24xx/common.c | 35 +++++++++++++++++++++++ drivers/dma/s3c24xx-dma.c | 3 ++ include/linux/platform_data/dma-s3c24xx.h | 6 ++++ 3 files changed, 44 insertions(+) diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c index bf50328107bd..ba0ceebdd73d 100644 --- a/arch/arm/mach-s3c24xx/common.c +++ b/arch/arm/mach-s3c24xx/common.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -439,10 +440,44 @@ static struct s3c24xx_dma_channel s3c2440_dma_channels[DMACH_MAX] = { [DMACH_USB_EP4] = { S3C24XX_DMA_APB, true, S3C24XX_DMA_CHANREQ(4, 3), }, }; +static const struct dma_slave_map s3c2440_dma_slave_map[] = { + /* TODO: DMACH_XD0 */ + /* TODO: DMACH_XD1 */ + { "s3c2440-sdi", "rx-tx", (void *)DMACH_SDI }, + { "s3c2410-spi.0", "rx", (void *)DMACH_SPI0 }, + { "s3c2410-spi.0", "tx", (void *)DMACH_SPI0 }, + { "s3c2410-spi.1", "rx", (void *)DMACH_SPI1 }, + { "s3c2410-spi.1", "tx", (void *)DMACH_SPI1 }, + { "s3c2440-uart.0", "rx", (void *)DMACH_UART0 }, + { "s3c2440-uart.0", "tx", (void *)DMACH_UART0 }, + { "s3c2440-uart.1", "rx", (void *)DMACH_UART1 }, + { "s3c2440-uart.1", "tx", (void *)DMACH_UART1 }, + { "s3c2440-uart.2", "rx", (void *)DMACH_UART2 }, + { "s3c2440-uart.2", "tx", (void *)DMACH_UART2 }, + { "s3c2440-uart.3", "rx", (void *)DMACH_UART3 }, + { "s3c2440-uart.3", "tx", (void *)DMACH_UART3 }, + /* TODO: DMACH_TIMER */ + { "s3c24xx-iis", "rx", (void *)DMACH_I2S_IN }, + { "s3c24xx-iis", "tx", (void *)DMACH_I2S_OUT }, + { "samsung-ac97", "rx", (void *)DMACH_PCM_IN }, + { "samsung-ac97", "tx", (void *)DMACH_PCM_OUT }, + { "samsung-ac97", "rx", (void *)DMACH_MIC_IN }, + { "s3c-hsudc", "rx0", (void *)DMACH_USB_EP1 }, + { "s3c-hsudc", "rx1", (void *)DMACH_USB_EP2 }, + { "s3c-hsudc", "rx2", (void *)DMACH_USB_EP3 }, + { "s3c-hsudc", "rx3", (void *)DMACH_USB_EP4 }, + { "s3c-hsudc", "tx0", (void *)DMACH_USB_EP1 }, + { "s3c-hsudc", "tx1", (void *)DMACH_USB_EP2 }, + { "s3c-hsudc", "tx2", (void *)DMACH_USB_EP3 }, + { "s3c-hsudc", "tx3", (void *)DMACH_USB_EP4 } +}; + static struct s3c24xx_dma_platdata s3c2440_dma_platdata = { .num_phy_channels = 4, .channels = s3c2440_dma_channels, .num_channels = DMACH_MAX, + .slave_map = s3c2440_dma_slave_map, + .slavecnt = ARRAY_SIZE(s3c2440_dma_slave_map), }; struct platform_device s3c2440_device_dma = { diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index ce67075589f5..d5c85e7d2061 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -1301,6 +1301,9 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; + s3cdma->slave.filter.map = pdata->slave_map; + s3cdma->slave.filter.mapcnt = pdata->slavecnt; + s3cdma->slave.filter.fn = s3c24xx_dma_filter; /* Register as many memcpy channels as there are physical channels */ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h index 89ba1b0c90e4..4f9aba405e96 100644 --- a/include/linux/platform_data/dma-s3c24xx.h +++ b/include/linux/platform_data/dma-s3c24xx.h @@ -30,16 +30,22 @@ struct s3c24xx_dma_channel { u16 chansel; }; +struct dma_slave_map; + /** * struct s3c24xx_dma_platdata - platform specific settings * @num_phy_channels: number of physical channels * @channels: array of virtual channel descriptions * @num_channels: number of virtual channels + * @slave_map: dma slave map matching table + * @slavecnt: number of elements in slave_map */ struct s3c24xx_dma_platdata { int num_phy_channels; struct s3c24xx_dma_channel *channels; int num_channels; + const struct dma_slave_map *slave_map; + int slavecnt; }; struct dma_chan; From 3a03ea763a675ecb2a8805c6db01af125daa56e4 Mon Sep 17 00:00:00 2001 From: Jean-Francois Moine Date: Sun, 18 Sep 2016 09:59:50 +0200 Subject: [PATCH 147/169] dmaengine: sun6i: Add support for Allwinner A83T (sun8i) variant The A83T SoC has the same dma engine as the A31 (sun6i), with a reduced amount of endpoints and physical channels. Signed-off-by: Jean-Francois Moine Acked-by: Maxime Ripard Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/sun6i-dma.txt | 1 + drivers/dma/sun6i-dma.c | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index d13c136cef8c..6b267045f522 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt @@ -7,6 +7,7 @@ Required properties: - compatible: Must be one of "allwinner,sun6i-a31-dma" "allwinner,sun8i-a23-dma" + "allwinner,sun8i-a83t-dma" "allwinner,sun8i-h3-dma" - reg: Should contain the registers base address and length - interrupts: Should contain a reference to the interrupt used by this device diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 3835fcde3545..83461994e418 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -1011,6 +1011,12 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_vchans = 37, }; +static struct sun6i_dma_config sun8i_a83t_dma_cfg = { + .nr_max_channels = 8, + .nr_max_requests = 28, + .nr_max_vchans = 39, +}; + /* * The H3 has 12 physical channels, a maximum DRQ port id of 27, * and a total of 34 usable source and destination endpoints. @@ -1025,6 +1031,7 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, + { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { /* sentinel */ } }; From 4cd169419f5a5a1c57dadeb3588ca7ee4d6de69f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 2 Sep 2016 16:01:52 +0530 Subject: [PATCH 148/169] dmaengine: tegra-adma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Suggested-by: Jon Hunter Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b27a263df2e9..6ed182529a9d 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -480,10 +480,9 @@ config TEGRA20_APB_DMA config TEGRA210_ADMA bool "NVIDIA Tegra210 ADMA support" - depends on ARCH_TEGRA_210_SOC + depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK select DMA_ENGINE select DMA_VIRTUAL_CHANNELS - select PM_CLK help Support for the NVIDIA Tegra210 ADMA controller driver. The DMA controller has multiple DMA channels and is used to service From 86737510781bca1c5d27099c7a6ed4f33b631643 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:27 +0300 Subject: [PATCH 149/169] dmaengine: edma: Add missing MODULE_DEVICE_TABLE() for of_device_id structs The MODULE_DEVICE_TABLE() were missing from the driver for the of_device_id structures. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3d277fa76c1a..c2098a4b4dcf 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -274,11 +274,13 @@ static const struct of_device_id edma_of_ids[] = { }, {} }; +MODULE_DEVICE_TABLE(of, edma_of_ids); static const struct of_device_id edma_tptc_of_ids[] = { { .compatible = "ti,edma3-tptc", }, {} }; +MODULE_DEVICE_TABLE(of, edma_tptc_of_ids); static inline unsigned int edma_read(struct edma_cc *ecc, int offset) { From b7862742feadf22cc1496bb6b460236a900512db Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:28 +0300 Subject: [PATCH 150/169] dmaengine: edma: Fix of_device_id data parameter usage (legacy vs TPCC) Use pointers to static constant variables for eDMA binding type (legacy vs TPCC). Fixes the following warning when compiling the driver for 64bit architectures (x86_64 for example): drivers/dma/edma.c:2185:16: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] if (match && (u32)match->data == EDMA_BINDING_TPCC) ^ Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index c2098a4b4dcf..951b8b204a47 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -263,14 +263,19 @@ static const struct edmacc_param dummy_paramset = { #define EDMA_BINDING_LEGACY 0 #define EDMA_BINDING_TPCC 1 +static const u32 edma_binding_type[] = { + [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY, + [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC, +}; + static const struct of_device_id edma_of_ids[] = { { .compatible = "ti,edma3", - .data = (void *)EDMA_BINDING_LEGACY, + .data = &edma_binding_type[EDMA_BINDING_LEGACY], }, { .compatible = "ti,edma3-tpcc", - .data = (void *)EDMA_BINDING_TPCC, + .data = &edma_binding_type[EDMA_BINDING_TPCC], }, {} }; @@ -2184,7 +2189,7 @@ static int edma_probe(struct platform_device *pdev) const struct of_device_id *match; match = of_match_node(edma_of_ids, node); - if (match && (u32)match->data == EDMA_BINDING_TPCC) + if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) legacy_mode = false; info = edma_setup_info_from_dt(dev, legacy_mode); From f1d1e34fa5929552662d2c25363bbb82dd3a8fec Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:29 +0300 Subject: [PATCH 151/169] dmaengine: edma: Use correct type for of_find_property() third parameter The correct type is int and not for the third parameter of of_find_property(). Fixes compilation for 64bit architectures (x86_64, aarch64). Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 951b8b204a47..4f00db34db98 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -2026,8 +2026,7 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, { struct edma_soc_info *info; struct property *prop; - size_t sz; - int ret; + int sz, ret; info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); if (!info) From 1634d3083579a0f228fbe337f20fec0e196d36b2 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 22 Sep 2016 09:31:04 +0300 Subject: [PATCH 152/169] dmaengine: edma: Rename set_bits and remove unused clear_bits helper The clear_bits() helper is not used by the driver so it can be removed. powerpc architecture defines the set_bits() in arch/powerpc/include/asm/bitops.h which results failed compile testing on powerpc architecture: >> drivers/dma/edma.c:415:20: error: conflicting types for 'set_bits' static inline void set_bits(int offset, int len, unsigned long *p) ^~~~~~~~ In file included from include/linux/bitops.h:36:0, from include/linux/kernel.h:10, from include/linux/list.h:8, from include/linux/kobject.h:20, from include/linux/device.h:17, from include/linux/dmaengine.h:20, from drivers/dma/edma.c:16: arch/powerpc/include/asm/bitops.h:75:14: note: previous definition of 'set_bits' was here DEFINE_BITOP(set_bits, or, "") ^ arch/powerpc/include/asm/bitops.h:58:24: note: in definition of macro 'DEFINE_BITOP' static __inline__ void fn(unsigned long mask, \ ^~ >> drivers/dma/edma.c:421:20: error: conflicting types for 'clear_bits' static inline void clear_bits(int offset, int len, unsigned long *p) ^~~~~~~~~~ In file included from include/linux/bitops.h:36:0, from include/linux/kernel.h:10, from include/linux/list.h:8, from include/linux/kobject.h:20, from include/linux/device.h:17, from include/linux/dmaengine.h:20, from drivers/dma/edma.c:16: arch/powerpc/include/asm/bitops.h:76:14: note: previous definition of 'clear_bits' was here DEFINE_BITOP(clear_bits, andc, "") ^ arch/powerpc/include/asm/bitops.h:58:24: note: in definition of macro 'DEFINE_BITOP' static __inline__ void fn(unsigned long mask, \ ^~ Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 4f00db34db98..844e87b3bffc 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -412,18 +412,12 @@ static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); } -static inline void set_bits(int offset, int len, unsigned long *p) +static inline void edma_set_bits(int offset, int len, unsigned long *p) { for (; len > 0; len--) set_bit(offset + (len - 1), p); } -static inline void clear_bits(int offset, int len, unsigned long *p) -{ - for (; len > 0; len--) - clear_bit(offset + (len - 1), p); -} - static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, int priority) { @@ -2266,7 +2260,7 @@ static int edma_probe(struct platform_device *pdev) for (i = 0; rsv_slots[i][0] != -1; i++) { off = rsv_slots[i][0]; ln = rsv_slots[i][1]; - set_bits(off, ln, ecc->slot_inuse); + edma_set_bits(off, ln, ecc->slot_inuse); } } } From a8db115e476ee31fc3e892522038da50dd3a66cc Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:30 +0300 Subject: [PATCH 153/169] dmaengine/ARM: omap-dma: Fix the DMAengine compile test on non OMAP configs The DMAengine driver for omap-dma use three function calls from the plat-omap legacy driver. When the DMAengine driver is built when ARCH_OMAP is not set, the compilation will fail due to missing symbols. Add empty inline functions to allow the DMAengine driver to be compiled with COMPILE_TEST. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- include/linux/omap-dma.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 1d99b61adc65..290081620b3e 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -297,6 +297,7 @@ struct omap_system_dma_plat_info { #define dma_omap15xx() __dma_omap15xx(d) #define dma_omap16xx() __dma_omap16xx(d) +#if defined(CONFIG_ARCH_OMAP) extern struct omap_system_dma_plat_info *omap_get_plat_info(void); extern void omap_set_dma_priority(int lch, int dst_port, int priority); @@ -355,4 +356,22 @@ static inline int omap_lcd_dma_running(void) } #endif +#else /* CONFIG_ARCH_OMAP */ + +static inline struct omap_system_dma_plat_info *omap_get_plat_info(void) +{ + return NULL; +} + +static inline int omap_request_dma(int dev_id, const char *dev_name, + void (*callback)(int lch, u16 ch_status, void *data), + void *data, int *dma_ch) +{ + return -ENODEV; +} + +static inline void omap_free_dma(int ch) { } + +#endif /* CONFIG_ARCH_OMAP */ + #endif /* __LINUX_OMAP_DMA_H */ From e7282b66a3bc8650cb9f5acfe63c819ba09015ca Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:31 +0300 Subject: [PATCH 154/169] dmaengine: ti-dma-crossbar: Correct type for of_find_property() third parameter The correct type is int and not for the third parameter of of_find_property(). Fixes compilation for 64bit architectures (x86_64, aarch64). Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/ti-dma-crossbar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 5ae294b256a7..e4f3bd1ae264 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -311,7 +311,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) struct property *prop; struct resource *res; u32 safe_val; - size_t sz; + int sz; void __iomem *iomem; int i, ret; From 5f9367a8d283495341a7628d83b6a2f505417ecb Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:32 +0300 Subject: [PATCH 155/169] dmaengine: ti-dma-crossbar: Fix of_device_id data parameter usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use pointers to static constant variables for crossbar type and for DMA offset configuration. Fixes compiler warnings on 64bit architectures: drivers/dma/ti-dma-crossbar.c: In function ‘ti_dra7_xbar_probe’: drivers/dma/ti-dma-crossbar.c:398:21: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] xbar->dma_offset = (u32)match->data; ^ drivers/dma/ti-dma-crossbar.c: In function ‘ti_dma_xbar_probe’: drivers/dma/ti-dma-crossbar.c:431:10: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] switch ((u32)match->data) { ^ Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/ti-dma-crossbar.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index e4f3bd1ae264..3f24aeb48c0e 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -18,15 +18,19 @@ #define TI_XBAR_DRA7 0 #define TI_XBAR_AM335X 1 +static const u32 ti_xbar_type[] = { + [TI_XBAR_DRA7] = TI_XBAR_DRA7, + [TI_XBAR_AM335X] = TI_XBAR_AM335X, +}; static const struct of_device_id ti_dma_xbar_match[] = { { .compatible = "ti,dra7-dma-crossbar", - .data = (void *)TI_XBAR_DRA7, + .data = &ti_xbar_type[TI_XBAR_DRA7], }, { .compatible = "ti,am335x-edma-crossbar", - .data = (void *)TI_XBAR_AM335X, + .data = &ti_xbar_type[TI_XBAR_AM335X], }, {}, }; @@ -190,9 +194,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) #define TI_DRA7_XBAR_OUTPUTS 127 #define TI_DRA7_XBAR_INPUTS 256 -#define TI_XBAR_EDMA_OFFSET 0 -#define TI_XBAR_SDMA_OFFSET 1 - struct ti_dra7_xbar_data { void __iomem *iomem; @@ -280,18 +281,25 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, return map; } +#define TI_XBAR_EDMA_OFFSET 0 +#define TI_XBAR_SDMA_OFFSET 1 +static const u32 ti_dma_offset[] = { + [TI_XBAR_EDMA_OFFSET] = 0, + [TI_XBAR_SDMA_OFFSET] = 1, +}; + static const struct of_device_id ti_dra7_master_match[] = { { .compatible = "ti,omap4430-sdma", - .data = (void *)TI_XBAR_SDMA_OFFSET, + .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET], }, { .compatible = "ti,edma3", - .data = (void *)TI_XBAR_EDMA_OFFSET, + .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], }, { .compatible = "ti,edma3-tpcc", - .data = (void *)TI_XBAR_EDMA_OFFSET, + .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET], }, {}, }; @@ -395,7 +403,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) xbar->dmarouter.dev = &pdev->dev; xbar->dmarouter.route_free = ti_dra7_xbar_free; - xbar->dma_offset = (u32)match->data; + xbar->dma_offset = *(u32 *)match->data; mutex_init(&xbar->mutex); platform_set_drvdata(pdev, xbar); @@ -428,7 +436,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) if (unlikely(!match)) return -EINVAL; - switch ((u32)match->data) { + switch (*(u32 *)match->data) { case TI_XBAR_DRA7: ret = ti_dra7_xbar_probe(pdev); break; From c5df3572fa13fa1445f5b276f046e791c4aac72b Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:33 +0300 Subject: [PATCH 156/169] dmaengine: edma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6ed182529a9d..3b70abe5b201 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -511,7 +511,7 @@ config TI_DMA_CROSSBAR config TI_EDMA bool "TI EDMA support" - depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE + depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS select TI_DMA_CROSSBAR if ARCH_OMAP From 54ff7a2d2420048a904151c0f8c996712d50e08c Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:34 +0300 Subject: [PATCH 157/169] dmaengine: omap-dma: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3b70abe5b201..7ba90d2d65c4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -130,7 +130,7 @@ config DMA_JZ4780 config DMA_OMAP tristate "OMAP DMA support" - depends on ARCH_OMAP + depends on ARCH_OMAP || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS select TI_DMA_CROSSBAR if SOC_DRA7XX From 509cf0b8146c73e6f29bcf732d1af1b1aed5ec01 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 21 Sep 2016 15:41:35 +0300 Subject: [PATCH 158/169] dmaengine: ti-dma-crossbar: enable COMPILE_TEST To get more coverage, enable COMPILE_TEST for this driver. When compile testing eDMA or omap-dma, select also the ti-dma-crossbar so it is also covered by the compile testing. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 7ba90d2d65c4..9e680ecf31d6 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -133,7 +133,7 @@ config DMA_OMAP depends on ARCH_OMAP || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS - select TI_DMA_CROSSBAR if SOC_DRA7XX + select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST) config DMA_SA11X0 tristate "SA-11x0 DMA support" @@ -514,7 +514,7 @@ config TI_EDMA depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS - select TI_DMA_CROSSBAR if ARCH_OMAP + select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST) default n help Enable support for the TI EDMA controller. This DMA From 2895e1f8048d1be7b1b5be6439c740621c0e5361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 29 Sep 2016 12:02:39 +0200 Subject: [PATCH 159/169] dma-mapping: fix ia64 build, use PHYS_PFN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kbuild test robot reports: In file included from include/linux/skbuff.h:34:0, from include/linux/tcp.h:21, from drivers/net/ethernet/amd/xgbe/xgbe-drv.c:119: include/linux/dma-mapping.h: In function 'dma_map_resource': >> include/linux/dma-mapping.h:274:22: error: implicit declaration of function '__phys_to_pfn' [-Werror=implicit-function-declaration] unsigned long pfn = __phys_to_pfn(phys_addr); ^~~~~~~~~~~~~ ia64 does not provide __phys_to_pfn(), use the PHYS_PFN() alias. Signed-off-by: Niklas Söderlund Signed-off-by: Vinod Koul --- include/linux/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 6e00c7fdbbd3..ff7c87fb0305 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -271,7 +271,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); - unsigned long pfn = __phys_to_pfn(phys_addr); + unsigned long pfn = PHYS_PFN(phys_addr); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); From 3757dc48a66f829cf6ba82a612ba4587ab4b5f1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 29 Sep 2016 12:02:40 +0200 Subject: [PATCH 160/169] dma-mapping: fix m32r build warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kbuild test robot reports: In file included from include/linux/skbuff.h:34:0, from include/linux/icmpv6.h:4, from include/linux/ipv6.h:75, from include/net/ipv6.h:16, from include/linux/sunrpc/clnt.h:27, from include/linux/nfs_fs.h:30, from fs/lockd/clntlock.c:13: include/linux/dma-mapping.h: In function 'dma_map_resource': >> include/linux/dma-mapping.h:274:16: warning: unused variable 'pfn' [-Wunused-variable] unsigned long pfn = __phys_to_pfn(phys_addr); ^~~ The pfn value is only used once in the call to pfn_valid(), remove the variable and calculate the pfn when it's needed. Note that the kbuild report is old and PHYS_PFN() is now used instead of __phys_to_pfn() to calculate the pfn. Signed-off-by: Niklas Söderlund Signed-off-by: Vinod Koul --- include/linux/dma-mapping.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ff7c87fb0305..642cb4c7ad37 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -271,13 +271,12 @@ static inline dma_addr_t dma_map_resource(struct device *dev, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); - unsigned long pfn = PHYS_PFN(phys_addr); dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); /* Don't allow RAM to be mapped */ - BUG_ON(pfn_valid(pfn)); + BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); addr = phys_addr; if (ops->map_resource) From 2cc40ee7ae30fa12c3ee3f18e6c0e81cced2cdcc Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 30 Sep 2016 18:19:01 +0200 Subject: [PATCH 161/169] dmaengine: edma: avoid uninitialized variable use If edma_read_slot() gets an invalid argument, it does not set a result, as found by "gcc -Wmaybe-uninitialized" drivers/dma/edma.c: In function 'dma_ccerr_handler': drivers/dma/edma.c:1499:21: error: 'p.a_b_cnt' may be used uninitialized in this function [-Werror=maybe-uninitialized] drivers/dma/edma.c:1499:21: error: 'p.ccnt' may be used uninitialized in this function [-Werror=maybe-uninitialized] if (p.a_b_cnt == 0 && p.ccnt == 0) { If we change the function to return an error in this case, we can handle the failure more gracefully and treat this the same way as a null slot that we already catch. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3d277fa76c1a..9d5dff4a43fd 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -464,13 +464,15 @@ static void edma_write_slot(struct edma_cc *ecc, unsigned slot, memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); } -static void edma_read_slot(struct edma_cc *ecc, unsigned slot, +static int edma_read_slot(struct edma_cc *ecc, unsigned slot, struct edmacc_param *param) { slot = EDMA_CHAN_SLOT(slot); if (slot >= ecc->num_slots) - return; + return -EINVAL; memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); + + return 0; } /** @@ -1476,13 +1478,15 @@ static void edma_error_handler(struct edma_chan *echan) struct edma_cc *ecc = echan->ecc; struct device *dev = echan->vchan.chan.device->dev; struct edmacc_param p; + int err; if (!echan->edesc) return; spin_lock(&echan->vchan.lock); - edma_read_slot(ecc, echan->slot[0], &p); + err = edma_read_slot(ecc, echan->slot[0], &p); + /* * Issue later based on missed flag which will be sure * to happen as: @@ -1495,7 +1499,7 @@ static void edma_error_handler(struct edma_chan *echan) * lead to some nasty recursion when we are in a NULL * slot. So we avoid doing so and set the missed flag. */ - if (p.a_b_cnt == 0 && p.ccnt == 0) { + if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) { dev_dbg(dev, "Error on null slot, setting miss\n"); echan->missed = 1; } else { From 9f0df936b1b93a7fab5b9ed73009f43369c121ea Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Sep 2016 19:14:38 +0100 Subject: [PATCH 162/169] dmaengine: coh901318: fix integer overflow when shifting more than 32 places Currently U300_DMA_CHANNELS is set to 40, meaning that the shift of 1 can be more than 32 places, which leads to a 32 bit integer overflow. Fix this by using 1ULL instead of 1 before shifting it. Also add braces on the for-loop to keep with coding style conventions. Signed-off-by: Colin Ian King Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a373ecacfaba..5b1d4a39b30b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1352,9 +1352,10 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); - for (i = 0; i < U300_DMA_CHANNELS; i++) - if (started_channels & (1 << i)) + for (i = 0; i < U300_DMA_CHANNELS; i++) { + if (started_channels & (1ULL << i)) tmp += sprintf(tmp, "channel %d\n", i); + } tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); From 2e0cc304e984290935c70fdbe589931e552d7d94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Thu, 29 Sep 2016 21:59:15 +0200 Subject: [PATCH 163/169] dma-debug: fix ia64 build, use PHYS_PFN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kbuild test robot reports: lib/dma-debug.c: In function 'debug_dma_map_resource': >> lib/dma-debug.c:1541:16: error: implicit declaration of function '__phys_to_pfn' [-Werror=implicit-function-declaration] entry->pfn = __phys_to_pfn(addr); ^~~~~~~~~~~~~ ia64 does not provide __phys_to_pfn(), use the PHYS_PFN() alias. Fixes: 0e74b34dfc3318bf ("dma-debug: add support for resource mappings") Signed-off-by: Niklas Söderlund Signed-off-by: Vinod Koul --- lib/dma-debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 2ba086b0d3e7..59e75863bfa0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -1514,7 +1514,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, entry->type = dma_debug_resource; entry->dev = dev; - entry->pfn = __phys_to_pfn(addr); + entry->pfn = PHYS_PFN(addr); entry->offset = offset_in_page(addr); entry->size = size; entry->dev_addr = dma_addr; From fc878efe84c342441706fcdf776e08fe455fe9c1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 29 Sep 2016 18:45:05 +0100 Subject: [PATCH 164/169] dmaengine: jz4780: fix resource leaks on error exit return In two cases when jz4780_dma_setup_hwdesc fails, there is a memory leak on the allocated desc and associated DMA pools on the error exit return path. Fix this by free'ing the resources before returning. Signed-off-by: Colin Ian King Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4780.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index dade7c47ff18..27e93368b62c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -324,8 +324,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( sg_dma_address(&sgl[i]), sg_dma_len(&sgl[i]), direction); - if (err < 0) + if (err < 0) { + jz4780_dma_desc_free(&jzchan->desc->vdesc); return NULL; + } desc->desc[i].dcm |= JZ_DMA_DCM_TIE; @@ -368,8 +370,10 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( for (i = 0; i < periods; i++) { err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, period_len, direction); - if (err < 0) + if (err < 0) { + jz4780_dma_desc_free(&jzchan->desc->vdesc); return NULL; + } buf_addr += period_len; From 585a1db1bed7a0cf5142f859430024b98475a219 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 28 Sep 2016 16:15:11 +0530 Subject: [PATCH 165/169] dmaengine: fsldma: Unmap region obtained by of_iomap Free memory mapping, if probe is not successful. Signed-off-by: Arvind Yadav Acked-by: Li Yang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 4b7a66d357d2..6ccb787ba56d 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1351,7 +1351,7 @@ static int fsldma_of_probe(struct platform_device *op) if (!fdev->regs) { dev_err(&op->dev, "unable to ioremap registers\n"); err = -ENOMEM; - goto out_free_fdev; + goto out_free; } /* map the channel IRQ if it exists, but don't hookup the handler yet */ @@ -1416,6 +1416,8 @@ static int fsldma_of_probe(struct platform_device *op) out_free_fdev: irq_dispose_mapping(fdev->irq); + iounmap(fdev->regs); +out_free: kfree(fdev); out_return: return err; From 3770f2a6bfe00869a4b83ee3cced0361ab33289a Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Thu, 29 Sep 2016 01:25:48 +0300 Subject: [PATCH 166/169] DT: dmaengine: rcar-dmac: document R8A7743/5 support Renesas RZ/G SoC also have the R-Car gen2/3 compatible DMA controllers. Document RZ/G1[ME] (also known as R8A774[35]) SoC bindings. Signed-off-by: Sergei Shtylyov Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 5b902ac8d97e..5f2ce669789a 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -1,4 +1,4 @@ -* Renesas R-Car DMA Controller Device Tree bindings +* Renesas R-Car (RZ/G) DMA Controller Device Tree bindings Renesas R-Car Generation 2 SoCs have multiple multi-channel DMA controller instances named DMAC capable of serving multiple clients. Channels @@ -16,6 +16,8 @@ Required Properties: - compatible: "renesas,dmac-", "renesas,rcar-dmac" as fallback. Examples with soctypes are: + - "renesas,dmac-r8a7743" (RZ/G1M) + - "renesas,dmac-r8a7745" (RZ/G1E) - "renesas,dmac-r8a7790" (R-Car H2) - "renesas,dmac-r8a7791" (R-Car M2-W) - "renesas,dmac-r8a7792" (R-Car V2H) From e7b2acfc79f052d023bf15bb6f82c90166ba74b5 Mon Sep 17 00:00:00 2001 From: Misael Lopez Cruz Date: Fri, 16 Sep 2016 13:53:15 +0300 Subject: [PATCH 167/169] dmaengine: omap-dma: Enable burst and data pack for SG Enable the burst and data pack modes for the scatter-gather in order to improve the throughput of the data transfers. The improvement has been verified with MMC HS200 mode in the DRA72 EVM using the iozone tool to compare the read throughput (in kB/s) with and without burst/pack for different reclens (in kB). With reclen Baseline sDMA burst/pack ------ -------- --------------- 64 46568 50820 128 57564 63413 256 65634 74937 512 72427 83483 1024 74563 84504 2048 76265 86079 4096 78045 87335 8192 78989 88154 16384 81265 91034 Signed-off-by: Misael Lopez Cruz Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/omap-dma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 1b7f5f32bf74..7ca27d4b1c54 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -904,13 +904,16 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( d->es = es; d->ccr = c->ccr | CCR_SYNC_FRAME; - if (dir == DMA_DEV_TO_MEM) + if (dir == DMA_DEV_TO_MEM) { d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; - else + d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; + } else { d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; + d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; + } d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; - d->csdp = es; + d->csdp |= es; if (dma_omap1()) { d->cicr |= CICR_TOUT_IE; From 02aa84860c29f3f5a57e959982c811df46a2736c Mon Sep 17 00:00:00 2001 From: Baoyou Xie Date: Sat, 24 Sep 2016 12:37:05 +0800 Subject: [PATCH 168/169] dmaengine: virt-dma: move function declarations We get 2 warnings when building kernel with W=1: drivers/dma/virt-dma.c:22:14: warning: no previous prototype for 'vchan_tx_submit' [-Wmissing-prototypes] drivers/dma/virt-dma.c:52:5: warning: no previous prototype for 'vchan_tx_desc_free' [-Wmissing-prototypes] In fact, these two functions are incorrectly declared in a function. So this patch moves function declarations out of this function. Signed-off-by: Baoyou Xie Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/virt-dma.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index a030ae7b1df2..3f776a46a29c 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -45,6 +45,8 @@ static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); +extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); +extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); /** * vchan_tx_prep - prepare a descriptor @@ -55,8 +57,6 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, unsigned long tx_flags) { - extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *); unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); From c84750906b4818d4929fbf73a4ae6c113b94f52b Mon Sep 17 00:00:00 2001 From: Justin Maggard Date: Tue, 4 Oct 2016 13:17:58 -0700 Subject: [PATCH 169/169] async_pq_val: fix DMA memory leak Add missing dmaengine_unmap_put(), so we don't OOM during RAID6 sync. Fixes: 1786b943dad0 ("async_pq_val: convert to dmaengine_unmap_data") Signed-off-by: Justin Maggard Reviewed-by: Dan Williams Cc: Signed-off-by: Vinod Koul --- crypto/async_tx/async_pq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 08b3ac68952b..f83de99d7d71 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -368,8 +368,6 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); - - return tx; } else { struct page *p_src = P(blocks, disks); struct page *q_src = Q(blocks, disks); @@ -424,9 +422,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, submit->cb_param = cb_param_orig; submit->flags = flags_orig; async_tx_sync_epilog(submit); - - return NULL; + tx = NULL; } + dmaengine_unmap_put(unmap); + + return tx; } EXPORT_SYMBOL_GPL(async_syndrome_val);