diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt index b3408cc57be6..1ae4748730a8 100644 --- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt +++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt @@ -47,8 +47,8 @@ When the OS is not in control of the management interface (i.e. it's a guest), the channel nodes appear on their own, not under a management node. Required properties: -- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1" -for MSI capable HW. +- compatible: must contain "qcom,hidma-1.0" for initial HW or + "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW. - reg: Addresses for the transfer and event channel - interrupts: Should contain the event interrupt - desc-count: Number of asynchronous requests this channel can handle diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst index 814acb4d2294..dfc4486b5743 100644 --- a/Documentation/driver-api/dmaengine/provider.rst +++ b/Documentation/driver-api/dmaengine/provider.rst @@ -111,40 +111,36 @@ The first thing you need to do in your driver is to allocate this structure. Any of the usual memory allocators will do, but you'll also need to initialize a few fields in there: -- channels: should be initialized as a list using the +- ``channels``: should be initialized as a list using the INIT_LIST_HEAD macro for example -- src_addr_widths: +- ``src_addr_widths``: should contain a bitmask of the supported source transfer width -- dst_addr_widths: +- ``dst_addr_widths``: should contain a bitmask of the supported destination transfer width -- directions: +- ``directions``: should contain a bitmask of the supported slave directions (i.e. excluding mem2mem transfers) -- residue_granularity: +- ``residue_granularity``: + granularity of the transfer residue reported to dma_set_residue. + This can be either: - - Granularity of the transfer residue reported to dma_set_residue. - This can be either: + - Descriptor: + your device doesn't support any kind of residue + reporting. The framework will only know that a particular + transaction descriptor is done. - - Descriptor + - Segment: + your device is able to report which chunks have been transferred - - Your device doesn't support any kind of residue - reporting. The framework will only know that a particular - transaction descriptor is done. + - Burst: + your device is able to report which burst have been transferred - - Segment - - - Your device is able to report which chunks have been transferred - - - Burst - - - Your device is able to report which burst have been transferred - - - dev: should hold the pointer to the ``struct device`` associated - to your current driver instance. +- ``dev``: should hold the pointer to the ``struct device`` associated + to your current driver instance. Supported transaction types --------------------------- diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 4d0979e02a28..f87ed3be779a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, } EXPORT_SYMBOL_GPL(acpi_match_device); +void *acpi_get_match_data(const struct device *dev) +{ + const struct acpi_device_id *match; + + if (!dev->driver) + return NULL; + + if (!dev->driver->acpi_match_table) + return NULL; + + match = acpi_match_device(dev->driver->acpi_match_table, dev); + if (!match) + return NULL; + + return (void *)match->driver_data; +} +EXPORT_SYMBOL_GPL(acpi_get_match_data); + int acpi_match_device_ids(struct acpi_device *device, const struct acpi_device_id *ids) { diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index e26ea209b63e..466d1503aba0 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, return 0; } +static void * +acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, + const struct device *dev) +{ + return acpi_get_match_data(dev); +} + #define DECLARE_ACPI_FWNODE_OPS(ops) \ const struct fwnode_operations ops = { \ .device_is_available = acpi_fwnode_device_is_available, \ + .device_get_match_data = acpi_fwnode_device_get_match_data, \ .property_present = acpi_fwnode_property_present, \ .property_read_int_array = \ acpi_fwnode_property_read_int_array, \ diff --git a/drivers/base/property.c b/drivers/base/property.c index 851b1b6596a4..09eaac9400ed 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1340,3 +1340,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint); } EXPORT_SYMBOL(fwnode_graph_parse_endpoint); + +void *device_get_match_data(struct device *dev) +{ + return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, + dev); +} +EXPORT_SYMBOL_GPL(device_get_match_data); diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index b52b0d55247e..97483df1f82e 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan) } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_desc_free(&plchan->at->vd); + vchan_terminate_vdesc(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan) return 0; } +static void pl08x_synchronize(struct dma_chan *chan) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + + vchan_synchronize(&plchan->vc); +} + static int pl08x_pause(struct dma_chan *chan) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); @@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.device_pause = pl08x_pause; pl08x->memcpy.device_resume = pl08x_resume; pl08x->memcpy.device_terminate_all = pl08x_terminate_all; + pl08x->memcpy.device_synchronize = pl08x_synchronize; pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM); @@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.device_pause = pl08x_pause; pl08x->slave.device_resume = pl08x_resume; pl08x->slave.device_terminate_all = pl08x_terminate_all; + pl08x->slave.device_synchronize = pl08x_synchronize; pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS; pl08x->slave.directions = diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 6204cc32d09c..847f84a41a69 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) * c->desc is NULL and exit.) */ if (c->desc) { - bcm2835_dma_desc_free(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c->chan_base); @@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) return 0; } +static void bcm2835_dma_synchronize(struct dma_chan *chan) +{ + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + + vchan_synchronize(&c->vc); +} + static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq, unsigned int irq_flags) { @@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy; od->ddev.device_config = bcm2835_dma_slave_config; od->ddev.device_terminate_all = bcm2835_dma_terminate_all; + od->ddev.device_synchronize = bcm2835_dma_synchronize; od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index f7e965f63274..d9bee65a18a4 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param) BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) != ARRAY_SIZE(am335x_usb_queues_tx)); - if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx))) + if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx))) return false; cchan->q_num = queues[cchan->port_num].submit; diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 7373b7a555ec..85820a2d69d4 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) /* Clear the DMA status and stop the transfer. */ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); if (jzchan->desc) { - jz4780_dma_desc_free(&jzchan->desc->vdesc); + vchan_terminate_vdesc(&jzchan->desc->vdesc); jzchan->desc = NULL; } @@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan) return 0; } +static void jz4780_dma_synchronize(struct dma_chan *chan) +{ + struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); + + vchan_synchronize(&jzchan->vchan); +} + static int jz4780_dma_config(struct dma_chan *chan, struct dma_slave_config *config) { @@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; dd->device_config = jz4780_dma_config; dd->device_terminate_all = jz4780_dma_terminate_all; + dd->device_synchronize = jz4780_dma_synchronize; dd->device_tx_status = jz4780_dma_tx_status; dd->device_issue_pending = jz4780_dma_issue_pending; dd->src_addr_widths = JZ_DMA_BUSWIDTHS; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index ec5f9d2bc820..80cc2be6483c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -355,7 +355,7 @@ static void dmatest_callback(void *arg) { struct dmatest_done *done = arg; struct dmatest_thread *thread = - container_of(arg, struct dmatest_thread, done_wait); + container_of(done, struct dmatest_thread, test_done); if (!thread->done) { done->done = true; wake_up_all(done->wait); diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 9364a3ed345a..948df1ab5f1a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan) /* Move the cyclic channel back to default queue */ if (!echan->tc && echan->edesc->cyclic) edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); - /* - * free the running request descriptor - * since it is not in any of the vdesc lists - */ - edma_desc_free(&echan->edesc->vdesc); + + vchan_terminate_vdesc(&echan->edesc->vdesc); echan->edesc = NULL; } diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 0391f930aecc..25cec9c243e1 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan) static int mdc_terminate_all(struct dma_chan *chan) { struct mdc_chan *mchan = to_mdc_chan(chan); - struct mdc_tx_desc *mdesc; unsigned long flags; LIST_HEAD(head); @@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan) mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, MDC_CONTROL_AND_STATUS); - mdesc = mchan->desc; - mchan->desc = NULL; + if (mchan->desc) { + vchan_terminate_vdesc(&mchan->desc->vd); + mchan->desc = NULL; + } vchan_get_all_descriptors(&mchan->vc, &head); mdc_get_new_events(mchan); spin_unlock_irqrestore(&mchan->vc.lock, flags); - if (mdesc) - mdc_desc_free(&mdesc->vd); vchan_dma_desc_free_list(&mchan->vc, &head); return 0; } +static void mdc_synchronize(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + + vchan_synchronize(&mchan->vc); +} + static int mdc_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { @@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev) mdma->dma_dev.device_tx_status = mdc_tx_status; mdma->dma_dev.device_issue_pending = mdc_issue_pending; mdma->dma_dev.device_terminate_all = mdc_terminate_all; + mdma->dma_dev.device_synchronize = mdc_synchronize; mdma->dma_dev.device_config = mdc_slave_config; mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 2184881afe76..e7db24c67030 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver); MODULE_AUTHOR("Sascha Hauer, Pengutronix "); MODULE_DESCRIPTION("i.MX SDMA driver"); +#if IS_ENABLED(CONFIG_SOC_IMX6Q) +MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin"); +#endif +#if IS_ENABLED(CONFIG_SOC_IMX7D) +MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin"); +#endif MODULE_LICENSE("GPL"); diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 01d2a750a621..26b67455208f 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan) c->phy = NULL; p->vchan = NULL; if (p->ds_run) { - k3_dma_free_desc(&p->ds_run->vd); + vchan_terminate_vdesc(&p->ds_run->vd); p->ds_run = NULL; } p->ds_done = NULL; @@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan) return 0; } +static void k3_dma_synchronize(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + + vchan_synchronize(&c->vc); +} + static int k3_dma_transfer_pause(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); @@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_pause = k3_dma_transfer_pause; d->slave.device_resume = k3_dma_transfer_resume; d->slave.device_terminate_all = k3_dma_terminate_all; + d->slave.device_synchronize = k3_dma_synchronize; d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES; /* init virtual channel */ diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 5ba5714d0b7c..94d7bd7d2880 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch) to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch), mic_dma_intr_handler, mic_dma_thread_fn, "mic dma_channel", ch, ch->ch_num); - if (IS_ERR(ch->cookie)) - return PTR_ERR(ch->cookie); - return 0; + return PTR_ERR_OR_ZERO(ch->cookie); } static inline void mic_dma_free_irq(struct mic_dma_chan *ch) diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index f6dd849159d8..d21c19822feb 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan) * c->desc is NULL and exit.) */ if (c->desc) { - omap_dma_desc_free(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; /* Avoid stopping the dma twice */ if (!c->paused) diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index e3669850aef4..963cc5228d05 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include #include @@ -104,6 +105,10 @@ static unsigned int nr_desc_prm; module_param(nr_desc_prm, uint, 0644); MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); +enum hidma_cap { + HIDMA_MSI_CAP = 1, + HIDMA_IDENTITY_CAP, +}; /* process completed descriptors */ static void hidma_process_completed(struct hidma_chan *mchan) @@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev, #endif } -static bool hidma_msi_capable(struct device *dev) +static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap) { - struct acpi_device *adev = ACPI_COMPANION(dev); - const char *of_compat; - int ret = -EINVAL; + enum hidma_cap cap; - if (!adev || acpi_disabled) { - ret = device_property_read_string(dev, "compatible", - &of_compat); - if (ret) - return false; - - ret = strcmp(of_compat, "qcom,hidma-1.1"); - } else { -#ifdef CONFIG_ACPI - ret = strcmp(acpi_device_hid(adev), "QCOM8062"); -#endif - } - return ret == 0; + cap = (enum hidma_cap) device_get_match_data(dev); + return cap ? ((cap & test_cap) > 0) : 0; } static int hidma_probe(struct platform_device *pdev) @@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev) * Determine the MSI capability of the platform. Old HW doesn't * support MSI. */ - msi = hidma_msi_capable(&pdev->dev); - + msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP); device_property_read_u32(&pdev->dev, "desc-count", &dmadev->nr_descriptors); @@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev) if (!dmadev->nr_descriptors) dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; - dmadev->chidx = readl(dmadev->dev_trca + 0x28); + if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP)) + dmadev->chidx = readl(dmadev->dev_trca + 0x40); + else + dmadev->chidx = readl(dmadev->dev_trca + 0x28); /* Set DMA mask to 64 bits. */ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev) #if IS_ENABLED(CONFIG_ACPI) static const struct acpi_device_id hidma_acpi_ids[] = { {"QCOM8061"}, - {"QCOM8062"}, + {"QCOM8062", HIDMA_MSI_CAP}, + {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)}, {}, }; MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); @@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); static const struct of_device_id hidma_match[] = { {.compatible = "qcom,hidma-1.0",}, - {.compatible = "qcom,hidma-1.1",}, + {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),}, + {.compatible = "qcom,hidma-1.2", + .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),}, {}, }; MODULE_DEVICE_TABLE(of, hidma_match); diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c index 4999e266b2de..7c6e2ff212a2 100644 --- a/drivers/dma/qcom/hidma_ll.c +++ b/drivers/dma/qcom/hidma_ll.c @@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev) */ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) { + unsigned long irqflags; + if (cause & HIDMA_ERR_INT_MASK) { dev_err(lldev->dev, "error 0x%x, disabling...\n", cause); @@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) return; } + spin_lock_irqsave(&lldev->lock, irqflags); + writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); + spin_unlock_irqrestore(&lldev->lock, irqflags); + /* * Fine tuned for this HW... * @@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause) * Try to consume as many EVREs as possible. */ hidma_handle_tre_completion(lldev); - - /* We consumed TREs or there are pending TREs or EVREs. */ - writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG); } irqreturn_t hidma_ll_inthandler(int chirq, void *arg) diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index 7335e2eb9b72..000c7019ca7d 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -356,59 +357,29 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) { struct platform_device *pdev_parent = of_find_device_by_node(np); struct platform_device_info pdevinfo; - struct of_phandle_args out_irq; struct device_node *child; - struct resource *res = NULL; - const __be32 *cell; - int ret = 0, size, i, num; - u64 addr, addr_size; + struct resource *res; + int ret = 0; + + /* allocate a resource array */ + res = kcalloc(3, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; for_each_available_child_of_node(np, child) { - struct resource *res_iter; struct platform_device *new_pdev; - cell = of_get_property(child, "reg", &size); - if (!cell) { - ret = -EINVAL; - goto out; - } - - size /= sizeof(*cell); - num = size / - (of_n_addr_cells(child) + of_n_size_cells(child)) + 1; - - /* allocate a resource array */ - res = kcalloc(num, sizeof(*res), GFP_KERNEL); - if (!res) { - ret = -ENOMEM; - goto out; - } - - /* read each reg value */ - i = 0; - res_iter = res; - while (i < size) { - addr = of_read_number(&cell[i], - of_n_addr_cells(child)); - i += of_n_addr_cells(child); - - addr_size = of_read_number(&cell[i], - of_n_size_cells(child)); - i += of_n_size_cells(child); - - res_iter->start = addr; - res_iter->end = res_iter->start + addr_size - 1; - res_iter->flags = IORESOURCE_MEM; - res_iter++; - } - - ret = of_irq_parse_one(child, 0, &out_irq); - if (ret) + ret = of_address_to_resource(child, 0, &res[0]); + if (!ret) goto out; - res_iter->start = irq_create_of_mapping(&out_irq); - res_iter->name = "hidma event irq"; - res_iter->flags = IORESOURCE_IRQ; + ret = of_address_to_resource(child, 1, &res[1]); + if (!ret) + goto out; + + ret = of_irq_to_resource(child, 0, &res[2]); + if (ret <= 0) + goto out; memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.fwnode = &child->fwnode; @@ -416,7 +387,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) pdevinfo.name = child->name; pdevinfo.id = object_counter++; pdevinfo.res = res; - pdevinfo.num_res = num; + pdevinfo.num_res = 3; pdevinfo.data = NULL; pdevinfo.size_data = 0; pdevinfo.dma_mask = DMA_BIT_MASK(64); @@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np) */ of_msi_configure(&new_pdev->dev, child); of_node_put(child); - kfree(res); - res = NULL; } out: kfree(res); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index f04c4702d98b..cd92d696bcf9 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) /* Dequeue current job */ if (s3cchan->at) { - s3c24xx_dma_desc_free(&s3cchan->at->vd); + vchan_terminate_vdesc(&s3cchan->at->vd); s3cchan->at = NULL; } @@ -744,6 +744,13 @@ unlock: return ret; } +static void s3c24xx_dma_synchronize(struct dma_chan *chan) +{ + struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); + + vchan_synchronize(&s3cchan->vc); +} + static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) { /* Ensure all queued descriptors are freed */ @@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; + s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize; /* Initialize slave engine for SoC internal dedicated peripherals */ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); @@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; + s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize; s3cdma->slave.filter.map = pdata->slave_map; s3cdma->slave.filter.mapcnt = pdata->slavecnt; s3cdma->slave.filter.fn = s3c24xx_dma_filter; diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 35c3936edc45..e3ff162c03fc 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, /* ----------------------------------------------------------------------------- * Stop and reset */ +static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan) +{ + u32 chcr; + unsigned int i; + + /* + * Ensure that the setting of the DE bit is actually 0 after + * clearing it. + */ + for (i = 0; i < 1024; i++) { + chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); + if (!(chcr & RCAR_DMACHCR_DE)) + return; + udelay(1); + } + + dev_err(chan->chan.device->dev, "CHCR DE check error\n"); +} + +static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan) +{ + u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); + + if (!(chcr & RCAR_DMACHCR_DE)) + return; + + /* set DE=0 and flush remaining data */ + rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); + + /* make sure all remaining data was flushed */ + rcar_dmac_chcr_de_barrier(chan); + + /* back DE */ + rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); +} static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) { @@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); + rcar_dmac_chcr_de_barrier(chan); } static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) @@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, residue += chunk->size; } + if (desc->direction == DMA_DEV_TO_MEM) + rcar_dmac_sync_tcr(chan); + /* Add the residue for the current chunk. */ - residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; + residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; return residue; } @@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) if (chcr & RCAR_DMACHCR_TE) mask |= RCAR_DMACHCR_DE; rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); + if (mask & RCAR_DMACHCR_DE) + rcar_dmac_chcr_de_barrier(chan); if (chcr & RCAR_DMACHCR_DSE) ret |= rcar_dmac_isr_desc_stage_end(chan); diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index b652071a2096..b106e8a60af6 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc, return 0; } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index d5db0f6e1ff8..4dbb30cf94ac 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; - iomem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(iomem)) return PTR_ERR(iomem); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index b9d75a54c896..9a558e30c461 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc, } memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); - if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) { + if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID && + sconfig->device_fc) { if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK) return -EINVAL; tdc->slave_id = sconfig->slave_id; @@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; - csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; - csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + csr |= TEGRA_APBDMA_CSR_ONCE; + + if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { + csr |= TEGRA_APBDMA_CSR_FLOW; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + } + if (flags & DMA_PREP_INTERRUPT) csr |= TEGRA_APBDMA_CSR_IE_EOC; @@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; - csr |= TEGRA_APBDMA_CSR_FLOW; + if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) { + csr |= TEGRA_APBDMA_CSR_FLOW; + csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; + } + if (flags & DMA_PREP_INTERRUPT) csr |= TEGRA_APBDMA_CSR_IE_EOC; - csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 7df910e7c348..9272b173c746 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -54,7 +54,15 @@ struct ti_am335x_xbar_map { static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val, iomem + event); + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 896bafb7a532..395c698edb4d 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan) break; else { dev_err(chan2dev(chan), - "Couldnt allocate any descriptors\n"); + "Couldn't allocate any descriptors\n"); return -ENOMEM; } } diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 545e97279083..88ad8ed2a8d6 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg) dmaengine_desc_get_callback(&vd->tx, &cb); list_del(&vd->node); - if (dmaengine_desc_test_reuse(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + vchan_vdesc_fini(vd); dmaengine_desc_callback_invoke(&cb, NULL); } diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 3f776a46a29c..b09b75ab0751 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -35,6 +35,7 @@ struct virt_dma_chan { struct list_head desc_completed; struct virt_dma_desc *cyclic; + struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -103,6 +104,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) tasklet_schedule(&vc->task); } +/** + * vchan_vdesc_fini - Free or reuse a descriptor + * @vd: virtual descriptor to free/reuse + */ +static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + if (dmaengine_desc_test_reuse(&vd->tx)) + list_add(&vd->node, &vc->desc_allocated); + else + vc->desc_free(vd); +} + /** * vchan_cyclic_callback - report the completion of a period * @vd: virtual descriptor @@ -115,6 +130,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) tasklet_schedule(&vc->task); } +/** + * vchan_terminate_vdesc - Disable pending cyclic callback + * @vd: virtual descriptor to be terminated + * + * vc.lock must be held by caller + */ +static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + /* free up stuck descriptor */ + if (vc->vd_terminated) + vchan_vdesc_fini(vc->vd_terminated); + + vc->vd_terminated = vd; + if (vc->cyclic == vd) + vc->cyclic = NULL; +} + /** * vchan_next_desc - peek at the next descriptor to be processed * @vc: virtual channel to obtain descriptor from @@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) * Makes sure that all scheduled or active callbacks have finished running. For * proper operation the caller has to ensure that no new callbacks are scheduled * after the invocation of this function started. + * Free up the terminated cyclic descriptor to prevent memory leakage. */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + unsigned long flags; + tasklet_kill(&vc->task); + + spin_lock_irqsave(&vc->lock, flags); + if (vc->vd_terminated) { + vchan_vdesc_fini(vc->vd_terminated); + vc->vd_terminated = NULL; + } + spin_unlock_irqrestore(&vc->lock, flags); } #endif diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 5eef13380ca8..27b523530c4a 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -99,7 +99,9 @@ #define XILINX_DMA_REG_FRMPTR_STS 0x0024 #define XILINX_DMA_REG_PARK_PTR 0x0028 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 +#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8) #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 +#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0) #define XILINX_DMA_REG_VDMA_VERSION 0x002c /* Register Direct Mode Registers */ @@ -163,6 +165,7 @@ #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_COALESCE_MAX 255 +#define XILINX_DMA_NUM_DESCS 255 #define XILINX_DMA_NUM_APP_WORDS 5 /* Multi-Channel DMA Descriptor offsets*/ @@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw { * @next_desc_msb: MSB of Next Descriptor Pointer @0x04 * @buf_addr: Buffer address @0x08 * @buf_addr_msb: MSB of Buffer address @0x0C - * @pad1: Reserved @0x10 - * @pad2: Reserved @0x14 + * @mcdma_control: Control field for mcdma @0x10 + * @vsize_stride: Vsize and Stride field for mcdma @0x14 * @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 @@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw { /** * struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 - * @next_descmsb: Next Descriptor Pointer MSB @0x04 + * @next_desc_msb: Next Descriptor Pointer MSB @0x04 * @src_addr: Source address @0x08 - * @src_addrmsb: Source address MSB @0x0C + * @src_addr_msb: Source address MSB @0x0C * @dest_addr: Destination address @0x10 - * @dest_addrmsb: Destination address MSB @0x14 + * @dest_addr_msb: Destination address MSB @0x14 * @control: Control field @0x18 * @status: Status field @0x1C */ @@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor { * @pending_list: Descriptors waiting * @active_list: Descriptors ready to submit * @done_list: Complete descriptors + * @free_seg_list: Free descriptors * @common: DMA common channel * @desc_pool: Descriptors pool * @dev: The dma device @@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor { * @cyclic: Check for cyclic transfers. * @genlock: Support genlock mode * @err: Channel has errors + * @idle: Check for channel idle * @tasklet: Cleanup work after irq * @config: Device configuration info * @flush_on_fsync: Flush on Frame sync @@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor { * @desc_submitcount: Descriptor h/w submitted count * @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base + * @seg_p: Physical allocated segments base * @cyclic_seg_v: Statically allocated segment base for cyclic transfers + * @cyclic_seg_p: Physical allocated segments base for cyclic dma * @start_transfer: Differentiate b/w DMA IP's transfer * @stop_transfer: Differentiate b/w DMA IP's quiesce + * @tdest: TDEST value for mcdma */ struct xilinx_dma_chan { struct xilinx_dma_device *xdev; @@ -341,6 +349,7 @@ struct xilinx_dma_chan { struct list_head pending_list; struct list_head active_list; struct list_head done_list; + struct list_head free_seg_list; struct dma_chan common; struct dma_pool *desc_pool; struct device *dev; @@ -352,6 +361,7 @@ struct xilinx_dma_chan { bool cyclic; bool genlock; bool err; + bool idle; struct tasklet_struct tasklet; struct xilinx_vdma_config config; bool flush_on_fsync; @@ -360,18 +370,20 @@ struct xilinx_dma_chan { u32 desc_submitcount; u32 residue; struct xilinx_axidma_tx_segment *seg_v; + dma_addr_t seg_p; struct xilinx_axidma_tx_segment *cyclic_seg_v; + dma_addr_t cyclic_seg_p; void (*start_transfer)(struct xilinx_dma_chan *chan); int (*stop_transfer)(struct xilinx_dma_chan *chan); u16 tdest; }; /** - * enum xdma_ip_type: DMA IP type. + * enum xdma_ip_type - DMA IP type. * - * XDMA_TYPE_AXIDMA: Axi dma ip. - * XDMA_TYPE_CDMA: Axi cdma ip. - * XDMA_TYPE_VDMA: Axi vdma ip. + * @XDMA_TYPE_AXIDMA: Axi dma ip. + * @XDMA_TYPE_CDMA: Axi cdma ip. + * @XDMA_TYPE_VDMA: Axi vdma ip. * */ enum xdma_ip_type { @@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) static struct xilinx_axidma_tx_segment * xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) { - struct xilinx_axidma_tx_segment *segment; - dma_addr_t phys; + struct xilinx_axidma_tx_segment *segment = NULL; + unsigned long flags; - segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); - if (!segment) - return NULL; - - segment->phys = phys; + spin_lock_irqsave(&chan->lock, flags); + if (!list_empty(&chan->free_seg_list)) { + segment = list_first_entry(&chan->free_seg_list, + struct xilinx_axidma_tx_segment, + node); + list_del(&segment->node); + } + spin_unlock_irqrestore(&chan->lock, flags); return segment; } +static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw) +{ + u32 next_desc = hw->next_desc; + u32 next_desc_msb = hw->next_desc_msb; + + memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw)); + + hw->next_desc = next_desc; + hw->next_desc_msb = next_desc_msb; +} + /** * xilinx_dma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel @@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, struct xilinx_axidma_tx_segment *segment) { - dma_pool_free(chan->desc_pool, segment, segment->phys); + xilinx_dma_clean_hw_desc(&segment->hw); + + list_add_tail(&segment->node, &chan->free_seg_list); } /** @@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + unsigned long flags; dev_dbg(chan->dev, "Free all channel resources.\n"); xilinx_dma_free_descriptors(chan); + if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); - xilinx_dma_free_tx_segment(chan, chan->seg_v); + spin_lock_irqsave(&chan->lock, flags); + INIT_LIST_HEAD(&chan->free_seg_list); + spin_unlock_irqrestore(&chan->lock, flags); + + /* Free memory that is allocated for BD */ + dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, chan->seg_v, + chan->seg_p); + + /* Free Memory that is allocated for cyclic DMA Mode */ + dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v), + chan->cyclic_seg_v, chan->cyclic_seg_p); + } + + if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) { + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; } - dma_pool_destroy(chan->desc_pool); - chan->desc_pool = NULL; } /** @@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data) static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); + int i; /* Has this channel already been allocated? */ if (chan->desc_pool) @@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) * for meeting Xilinx VDMA specification requirement. */ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", - chan->dev, - sizeof(struct xilinx_axidma_tx_segment), - __alignof__(struct xilinx_axidma_tx_segment), - 0); + /* Allocate the buffer descriptors. */ + chan->seg_v = dma_zalloc_coherent(chan->dev, + sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, + &chan->seg_p, GFP_KERNEL); + if (!chan->seg_v) { + dev_err(chan->dev, + "unable to allocate channel %d descriptors\n", + chan->id); + return -ENOMEM; + } + + for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { + chan->seg_v[i].hw.next_desc = + lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_v[i].hw.next_desc_msb = + upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) * + ((i + 1) % XILINX_DMA_NUM_DESCS)); + chan->seg_v[i].phys = chan->seg_p + + sizeof(*chan->seg_v) * i; + list_add_tail(&chan->seg_v[i].node, + &chan->free_seg_list); + } } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", chan->dev, @@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) 0); } - if (!chan->desc_pool) { + if (!chan->desc_pool && + (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) { dev_err(chan->dev, "unable to allocate channel %d descriptor pool\n", chan->id); @@ -854,23 +918,21 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) } if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - /* - * For AXI DMA case after submitting a pending_list, keep - * an extra segment allocated so that the "next descriptor" - * pointer on the tail descriptor always points to a - * valid descriptor, even when paused after reaching taildesc. - * This way, it is possible to issue additional - * transfers without halting and restarting the channel. - */ - chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); - /* * For cyclic DMA mode we need to program the tail Descriptor * register with a value which is not a part of the BD chain * so allocating a desc segment during channel allocation for * programming tail descriptor. */ - chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); + chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, + sizeof(*chan->cyclic_seg_v), + &chan->cyclic_seg_p, GFP_KERNEL); + if (!chan->cyclic_seg_v) { + dev_err(chan->dev, + "unable to allocate desc segment for cyclic DMA\n"); + return -ENOMEM; + } + chan->cyclic_seg_v->phys = chan->cyclic_seg_p; } dma_cookie_init(dchan); @@ -935,35 +997,11 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, return ret; } -/** - * xilinx_dma_is_running - Check if DMA channel is running - * @chan: Driver specific DMA channel - * - * Return: '1' if running, '0' if not. - */ -static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) -{ - return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & - XILINX_DMA_DMASR_HALTED) && - (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & - XILINX_DMA_DMACR_RUNSTOP); -} - -/** - * xilinx_dma_is_idle - Check if DMA channel is idle - * @chan: Driver specific DMA channel - * - * Return: '1' if idle, '0' if not. - */ -static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) -{ - return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & - XILINX_DMA_DMASR_IDLE; -} - /** * xilinx_dma_stop_transfer - Halt DMA channel * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error */ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) { @@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan) /** * xilinx_cdma_stop_transfer - Wait for the current transfer to complete * @chan: Driver specific DMA channel + * + * Return: '0' on success and failure value on error */ static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan) { @@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_vdma_config *config = &chan->config; struct xilinx_dma_tx_descriptor *desc, *tail_desc; - u32 reg; + u32 reg, j; struct xilinx_vdma_tx_segment *tail_segment; /* This function was invoked with lock held */ if (chan->err) return; + if (!chan->idle) + return; + if (list_empty(&chan->pending_list)) return; @@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_vdma_tx_segment, node); - /* If it is SG mode and hardware is busy, cannot submit */ - if (chan->has_sg && xilinx_dma_is_running(chan) && - !xilinx_dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); - return; - } - /* * If hardware is idle, then all descriptors on the running lists are * done, start new transfers @@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) else reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; - /* Configure channel to allow number frame buffers */ - dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, - chan->desc_pendingcount); - /* * With SG, start with circular mode, so that BDs can be fetched. * In direct register mode, if not parking, enable circular mode @@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); - if (config->park && (config->park_frm >= 0) && - (config->park_frm < chan->num_frms)) { - if (chan->direction == DMA_MEM_TO_DEV) - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_RD_REF_SHIFT); - else - dma_write(chan, XILINX_DMA_REG_PARK_PTR, - config->park_frm << - XILINX_DMA_PARK_PTR_WR_REF_SHIFT); + j = chan->desc_submitcount; + reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR); + if (chan->direction == DMA_MEM_TO_DEV) { + reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT; + } else { + reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK; + reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT; } + dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg); /* Start the hardware */ xilinx_dma_start(chan); @@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) if (chan->has_sg) { dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); + list_splice_tail_init(&chan->pending_list, &chan->active_list); + chan->desc_pendingcount = 0; } else { struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; @@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, last->hw.stride); vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); - } - if (!chan->has_sg) { - list_del(&desc->node); - list_add_tail(&desc->node, &chan->active_list); chan->desc_submitcount++; chan->desc_pendingcount--; + list_del(&desc->node); + list_add_tail(&desc->node, &chan->active_list); if (chan->desc_submitcount == chan->num_frms) chan->desc_submitcount = 0; - } else { - list_splice_tail_init(&chan->pending_list, &chan->active_list); - chan->desc_pendingcount = 0; } + + chan->idle = false; } /** @@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) if (chan->err) return; + if (!chan->idle) + return; + if (list_empty(&chan->pending_list)) return; @@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) } if (chan->has_sg) { + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + + dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); @@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) list_splice_tail_init(&chan->pending_list, &chan->active_list); chan->desc_pendingcount = 0; + chan->idle = false; } /** @@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; - struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; + struct xilinx_axidma_tx_segment *tail_segment; u32 reg; if (chan->err) @@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) if (list_empty(&chan->pending_list)) return; - /* If it is SG mode and hardware is busy, cannot submit */ - if (chan->has_sg && xilinx_dma_is_running(chan) && - !xilinx_dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); + if (!chan->idle) return; - } head_desc = list_first_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); @@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); - if (chan->has_sg && !chan->xdev->mcdma) { - old_head = list_first_entry(&head_desc->segments, - struct xilinx_axidma_tx_segment, node); - new_head = chan->seg_v; - /* Copy Buffer Descriptor fields. */ - new_head->hw = old_head->hw; - - /* Swap and save new reserve */ - list_replace_init(&old_head->node, &new_head->node); - chan->seg_v = old_head; - - tail_segment->hw.next_desc = chan->seg_v->phys; - head_desc->async_tx.phys = new_head->phys; - } - reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { @@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) list_splice_tail_init(&chan->pending_list, &chan->active_list); chan->desc_pendingcount = 0; + chan->idle = false; } /** @@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan) } chan->err = false; + chan->idle = true; + chan->desc_submitcount = 0; return err; } @@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { spin_lock(&chan->lock); xilinx_dma_complete_descriptor(chan); + chan->idle = true; chan->start_transfer(chan); spin_unlock(&chan->lock); } @@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_vdma_tx_segment *segment, *prev = NULL; + struct xilinx_vdma_tx_segment *segment; struct xilinx_vdma_desc_hw *hw; if (!is_slave_direction(xt->dir)) @@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, /* Insert the segment into the descriptor segments list. */ list_add_tail(&segment->node, &desc->segments); - prev = segment; - /* Link the last hardware descriptor with the first. */ segment = list_first_entry(&desc->segments, struct xilinx_vdma_tx_segment, node); @@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; - struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; + struct xilinx_axidma_tx_segment *segment = NULL; u32 *app_w = (u32 *)context; struct scatterlist *sg; size_t copy; @@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( XILINX_DMA_NUM_APP_WORDS); } - if (prev) - prev->hw.next_desc = segment->phys; - - prev = segment; sg_used += copy; /* @@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( segment = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); desc->async_tx.phys = segment->phys; - prev->hw.next_desc = segment->phys; /* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (chan->direction == DMA_MEM_TO_DEV) { @@ -1821,11 +1839,14 @@ error: /** * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction - * @chan: DMA channel - * @sgl: scatterlist to transfer to/from - * @sg_len: number of entries in @scatterlist + * @dchan: DMA channel + * @buf_addr: Physical address of the buffer + * @buf_len: Total length of the cyclic buffers + * @period_len: length of individual cyclic buffer * @direction: DMA direction * @flags: transfer ack flags + * + * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, @@ -2009,7 +2030,9 @@ error: /** * xilinx_dma_terminate_all - Halt the channel and free descriptors - * @chan: Driver specific DMA Channel pointer + * @dchan: Driver specific DMA Channel pointer + * + * Return: '0' always. */ static int xilinx_dma_terminate_all(struct dma_chan *dchan) { @@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) /* Remove and free all of the descriptors in the lists */ xilinx_dma_free_descriptors(chan); + chan->idle = true; if (chan->cyclic) { reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); @@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) chan->cyclic = false; } + if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) + dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, + XILINX_CDMA_CR_SGMODE); + return 0; } @@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) * * @xdev: Driver specific device structure * @node: Device node + * @chan_id: DMA Channel id * * Return: '0' on success and failure value on error */ @@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->has_sg = xdev->has_sg; chan->desc_pendingcount = 0x0; chan->ext_addr = xdev->ext_addr; + /* This variable ensures that descriptors are not + * Submitted when dma engine is in progress. This variable is + * Added to avoid polling for a bit in the status register to + * Know dma state in the driver hot path. + */ + chan->idle = true; spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->pending_list); INIT_LIST_HEAD(&chan->done_list); INIT_LIST_HEAD(&chan->active_list); + INIT_LIST_HEAD(&chan->free_seg_list); /* Retrieve the channel properties from the device tree */ has_dre = of_property_read_bool(node, "xlnx,include-dre"); @@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; + chan->config.park = 1; if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) @@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; + chan->config.park = 1; if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) @@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, * Return: 0 always. */ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, - struct device_node *node) { + struct device_node *node) +{ int ret, i, nr_channels = 1; ret = of_property_read_u32(node, "dma-channels", &nr_channels); @@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev) goto error; } - dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); + if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) + dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n"); + else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) + dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n"); + else + dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); return 0; diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 1ee1241ca797..f14645817ed8 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "../dmaengine.h" @@ -47,6 +48,7 @@ #define ZYNQMP_DMA_SRC_START_MSB 0x15C #define ZYNQMP_DMA_DST_START_LSB 0x160 #define ZYNQMP_DMA_DST_START_MSB 0x164 +#define ZYNQMP_DMA_TOTAL_BYTE 0x188 #define ZYNQMP_DMA_RATE_CTRL 0x18C #define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190 #define ZYNQMP_DMA_IRQ_DST_ACCT 0x194 @@ -138,6 +140,8 @@ #define ZYNQMP_DMA_BUS_WIDTH_64 64 #define ZYNQMP_DMA_BUS_WIDTH_128 128 +#define ZDMA_PM_TIMEOUT 100 + #define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size) #define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \ @@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw { * @bus_width: Bus width * @src_burst_len: Source burst length * @dst_burst_len: Dest burst length - * @clk_main: Pointer to main clock - * @clk_apb: Pointer to apb clock */ struct zynqmp_dma_chan { struct zynqmp_dma_device *zdev; @@ -237,8 +239,6 @@ struct zynqmp_dma_chan { u32 bus_width; u32 src_burst_len; u32 dst_burst_len; - struct clk *clk_main; - struct clk *clk_apb; }; /** @@ -246,11 +246,15 @@ struct zynqmp_dma_chan { * @dev: Device Structure * @common: DMA device structure * @chan: Driver specific DMA channel + * @clk_main: Pointer to main clock + * @clk_apb: Pointer to apb clock */ struct zynqmp_dma_device { struct device *dev; struct dma_device common; struct zynqmp_dma_chan *chan; + struct clk *clk_main; + struct clk *clk_apb; }; static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg, @@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_desc_sw *desc; - int i; + int i, ret; + + ret = pm_runtime_get_sync(chan->dev); + if (ret < 0) + return ret; chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS, GFP_KERNEL); @@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) { writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER); + writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); chan->idle = false; writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2); } @@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan) */ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) { - u32 val; - + if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL) + writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE); if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR) - val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); + readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT); if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR) - val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); + readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT); } static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) @@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) * zynqmp_dma_device_config - Zynqmp dma device configuration * @dchan: DMA channel * @config: DMA device config + * + * Return: 0 always */ static int zynqmp_dma_device_config(struct dma_chan *dchan, struct dma_slave_config *config) @@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan) /** * zynqmp_dma_free_descriptors - Free channel descriptors - * @dchan: DMA channel pointer + * @chan: ZynqMP DMA channel pointer */ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) { @@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); kfree(chan->sw_desc_pool); + pm_runtime_mark_last_busy(chan->dev); + pm_runtime_put_autosuspend(chan->dev); } /** @@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data) if (status & ZYNQMP_DMA_INT_OVRFL) { zynqmp_dma_handle_ovfl_int(chan, status); - dev_info(chan->dev, "Channel %p overflow interrupt\n", chan); + dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan); ret = IRQ_HANDLED; } @@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan) if (!chan) return; - devm_free_irq(chan->zdev->dev, chan->irq, chan); + if (chan->irq) + devm_free_irq(chan->zdev->dev, chan->irq, chan); tasklet_kill(&chan->tasklet); list_del(&chan->common.device_node); - clk_disable_unprepare(chan->clk_apb); - clk_disable_unprepare(chan->clk_main); } /** @@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, "zynqmp-dma", chan); if (err) return err; - chan->clk_main = devm_clk_get(&pdev->dev, "clk_main"); - if (IS_ERR(chan->clk_main)) { - dev_err(&pdev->dev, "main clock not found.\n"); - return PTR_ERR(chan->clk_main); - } - - chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); - if (IS_ERR(chan->clk_apb)) { - dev_err(&pdev->dev, "apb clock not found.\n"); - return PTR_ERR(chan->clk_apb); - } - - err = clk_prepare_enable(chan->clk_main); - if (err) { - dev_err(&pdev->dev, "Unable to enable main clock.\n"); - return err; - } - - err = clk_prepare_enable(chan->clk_apb); - if (err) { - clk_disable_unprepare(chan->clk_main); - dev_err(&pdev->dev, "Unable to enable apb clock.\n"); - return err; - } chan->desc_size = sizeof(struct zynqmp_dma_desc_ll); chan->idle = true; @@ -952,6 +940,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec, return dma_get_slave_channel(&zdev->chan->common); } +/** + * zynqmp_dma_suspend - Suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 on success and failure value on error + */ +static int __maybe_unused zynqmp_dma_suspend(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_suspend(dev); + + return 0; +} + +/** + * zynqmp_dma_resume - Resume from suspend + * @dev: Address of the device structure + * + * Resume operation after suspend. + * Return: 0 on success and failure value on error + */ +static int __maybe_unused zynqmp_dma_resume(struct device *dev) +{ + if (!device_may_wakeup(dev)) + return pm_runtime_force_resume(dev); + + return 0; +} + +/** + * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 always + */ +static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev) +{ + struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); + + clk_disable_unprepare(zdev->clk_main); + clk_disable_unprepare(zdev->clk_apb); + + return 0; +} + +/** + * zynqmp_dma_runtime_resume - Runtime suspend method for the driver + * @dev: Address of the device structure + * + * Put the driver into low power mode. + * Return: 0 always + */ +static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev) +{ + struct zynqmp_dma_device *zdev = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(zdev->clk_main); + if (err) { + dev_err(dev, "Unable to enable main clock.\n"); + return err; + } + + err = clk_prepare_enable(zdev->clk_apb); + if (err) { + dev_err(dev, "Unable to enable apb clock.\n"); + clk_disable_unprepare(zdev->clk_main); + return err; + } + + return 0; +} + +static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume) + SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend, + zynqmp_dma_runtime_resume, NULL) +}; + /** * zynqmp_dma_probe - Driver probe function * @pdev: Pointer to the platform_device structure @@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev) p->device_config = zynqmp_dma_device_config; p->dev = &pdev->dev; + zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main"); + if (IS_ERR(zdev->clk_main)) { + dev_err(&pdev->dev, "main clock not found.\n"); + return PTR_ERR(zdev->clk_main); + } + + zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb"); + if (IS_ERR(zdev->clk_apb)) { + dev_err(&pdev->dev, "apb clock not found.\n"); + return PTR_ERR(zdev->clk_apb); + } + platform_set_drvdata(pdev, zdev); + pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); + pm_runtime_use_autosuspend(zdev->dev); + pm_runtime_enable(zdev->dev); + pm_runtime_get_sync(zdev->dev); + if (!pm_runtime_enabled(zdev->dev)) { + ret = zynqmp_dma_runtime_resume(zdev->dev); + if (ret) + return ret; + } ret = zynqmp_dma_chan_probe(zdev, pdev); if (ret) { dev_err(&pdev->dev, "Probing channel failed\n"); - goto free_chan_resources; + goto err_disable_pm; } p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); @@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev) goto free_chan_resources; } + pm_runtime_mark_last_busy(zdev->dev); + pm_runtime_put_sync_autosuspend(zdev->dev); + dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n"); return 0; free_chan_resources: zynqmp_dma_chan_remove(zdev->chan); +err_disable_pm: + if (!pm_runtime_enabled(zdev->dev)) + zynqmp_dma_runtime_suspend(zdev->dev); + pm_runtime_disable(zdev->dev); return ret; } @@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev) dma_async_device_unregister(&zdev->common); zynqmp_dma_chan_remove(zdev->chan); + pm_runtime_disable(zdev->dev); + if (!pm_runtime_enabled(zdev->dev)) + zynqmp_dma_runtime_suspend(zdev->dev); return 0; } @@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = { .driver = { .name = "xilinx-zynqmp-dma", .of_match_table = zynqmp_dma_of_match, + .pm = &zynqmp_dma_dev_pm_ops, }, .probe = zynqmp_dma_probe, .remove = zynqmp_dma_remove, diff --git a/drivers/of/property.c b/drivers/of/property.c index 8ad33a44a7b8..f25d36358187 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, return 0; } +static void * +of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, + const struct device *dev) +{ + return (void *)of_device_get_match_data(dev); +} + const struct fwnode_operations of_fwnode_ops = { .get = of_fwnode_get, .put = of_fwnode_put, .device_is_available = of_fwnode_device_is_available, + .device_get_match_data = of_fwnode_device_get_match_data, .property_present = of_fwnode_property_present, .property_read_int_array = of_fwnode_property_read_int_array, .property_read_string_array = of_fwnode_property_read_string_array, diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b8f4c3c776e5..a933f87ef98d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -585,6 +585,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); +void *acpi_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); @@ -762,6 +763,11 @@ static inline const struct acpi_device_id *acpi_match_device( return NULL; } +static inline void *acpi_get_match_data(const struct device *dev) +{ + return NULL; +} + static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 411a84c6c400..4fa1a489efe4 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -15,6 +15,7 @@ #include struct fwnode_operations; +struct device; struct fwnode_handle { struct fwnode_handle *secondary; @@ -51,6 +52,7 @@ struct fwnode_reference_args { * struct fwnode_operations - Operations for fwnode interface * @get: Get a reference to an fwnode. * @put: Put a reference to an fwnode. + * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. * @property_read_integer_array: Read an array of integer properties. Return * zero on success, a negative error code @@ -71,6 +73,8 @@ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); void (*put)(struct fwnode_handle *fwnode); bool (*device_is_available)(const struct fwnode_handle *fwnode); + void *(*device_get_match_data)(const struct fwnode_handle *fwnode, + const struct device *dev); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, diff --git a/include/linux/property.h b/include/linux/property.h index f6189a3ac63c..6653ed4b99f9 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -275,6 +275,8 @@ bool device_dma_supported(struct device *dev); enum dev_dma_attr device_get_dma_attr(struct device *dev); +void *device_get_match_data(struct device *dev); + int device_get_phy_mode(struct device *dev); void *device_get_mac_address(struct device *dev, char *addr, int alen);