alistair23-linux/drivers/dma/virt-dma.c
Linus Torvalds a5b871c91d dmaengine updates for v5.6-rc1
- Core:
    - Support for dynamic channels
    - Removal of various slave wrappers
    - Make few slave request APIs as private to dmaengine
    - Symlinks between channels and slaves
    - Support for hotplug of controllers
    - Support for metadata_ops for dma_async_tx_descriptor
    - Reporting DMA cached data amount
    - Virtual dma channel locking updates
 
  - New drivers/device/feature support support:
    - Driver for Intel data accelerators
    - Driver for TI K3 UDMA
    - Driver for PLX DMA engine
    - Driver for hisilicon Kunpeng DMA engine
    - Support for eDMA support for QorIQ LS1028A in fsl edma driver
    - Support for cyclic dma in sun4i driver
    - Support for X1830 in JZ4780 driver
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAl4u+QkACgkQfBQHDyUj
 g0cCcg//awBruofTHIrBOwHmCX1a09mw5WmkFG48N7tYp4fvaI1aOgs3hH9PZiBG
 fFZUktodwYpEKg6JJOfm1RnLBuKm0+3zmaKGPdK1RcbaDURh8G9qhW65f4mfImvB
 GXlgw59WKtgPAM9zWW9UxjugAk4DBte5xVKYJUsI0t4P7k9TM4i0Fv0VmMUhhDuo
 buPD1cM/GWFHbE7OYJ51aGRtrOHV1nPgQaHBkWaT7EotzGsZ3gtWYzteI3BRXRV/
 IkSgxOefMkIgu1j3KIxFZ1CJDHCZSnx2B+AEMCcp63osyeHBOYoL7KQxo6tBjaRV
 fbCasbkTkvvJUjyZdtOdU2wqf7ZqoDkD+n5nkpENf4G1M8J5RiHmrFq96m3HRonE
 V1bmMslXhsJlvtoT6ec2iJFchiq0nx1XHyST6faUOK+0cd1lzbogWwztydQH4fwd
 TxfEd+eYlFFu3lGDfRp14Tz7fAcFNPZ2bJQhZkF6RpwUW3y3L0cJc3Y0AcWmNkvJ
 oStvTlbbUvgRgO7rvEyAmdPb31lE6PLaA0WCahcvf4zQxxNMyYyaWP73MegvqJGO
 pfJXBOWBTTKwu0fDR5UHJd3tEDABvcZnwBaCSYrpI5f9bJ4NRI3f4DIMwLBnw9IK
 aH6pzwo4gTAMuvxzq8KeTp3hU7kszyUN8q8hiTZlgVozMLKXhQY=
 =mv1v
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This time we have a bunch of core changes to support dynamic channels,
  hotplug of controllers, new apis for metadata ops etc along with new
  drivers for Intel data accelerators, TI K3 UDMA, PLX DMA engine and
  hisilicon Kunpeng DMA engine. Also usual assorted updates to drivers.

  Core:
   - Support for dynamic channels
   - Removal of various slave wrappers
   - Make few slave request APIs as private to dmaengine
   - Symlinks between channels and slaves
   - Support for hotplug of controllers
   - Support for metadata_ops for dma_async_tx_descriptor
   - Reporting DMA cached data amount
   - Virtual dma channel locking updates

  New drivers/device/feature support support:
   - Driver for Intel data accelerators
   - Driver for TI K3 UDMA
   - Driver for PLX DMA engine
   - Driver for hisilicon Kunpeng DMA engine
   - Support for eDMA support for QorIQ LS1028A in fsl edma driver
   - Support for cyclic dma in sun4i driver
   - Support for X1830 in JZ4780 driver"

* tag 'dmaengine-5.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (62 commits)
  dmaengine: Create symlinks between DMA channels and slaves
  dmaengine: hisilicon: Add Kunpeng DMA engine support
  dmaengine: idxd: add char driver to expose submission portal to userland
  dmaengine: idxd: connect idxd to dmaengine subsystem
  dmaengine: idxd: add descriptor manipulation routines
  dmaengine: idxd: add sysfs ABI for idxd driver
  dmaengine: idxd: add configuration component of driver
  dmaengine: idxd: Init and probe for Intel data accelerators
  dmaengine: add support to dynamic register/unregister of channels
  dmaengine: break out channel registration
  x86/asm: add iosubmit_cmds512() based on MOVDIR64B CPU instruction
  dmaengine: ti: k3-udma: fix spelling mistake "limted" -> "limited"
  dmaengine: s3c24xx-dma: fix spelling mistake "to" -> "too"
  dmaengine: Move dma_get_{,any_}slave_channel() to private dmaengine.h
  dmaengine: Remove dma_request_slave_channel_compat() wrapper
  dmaengine: Remove dma_device_satisfies_mask() wrapper
  dt-bindings: fsl-imx-sdma: Add i.MX8MM/i.MX8MN/i.MX8MP compatible string
  dmaengine: zynqmp_dma: fix burst length configuration
  dmaengine: sun4i: Add support for cyclic requests with dedicated DMA
  dmaengine: fsl-qdma: fix duplicated argument to &&
  ...
2020-01-27 10:55:50 -08:00

143 lines
3.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Virtual DMA channel support for DMAengine
*
* Copyright (C) 2012 Russell King
*/
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "virt-dma.h"
static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
{
return container_of(tx, struct virt_dma_desc, tx);
}
dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct virt_dma_chan *vc = to_virt_chan(tx->chan);
struct virt_dma_desc *vd = to_virt_desc(tx);
unsigned long flags;
dma_cookie_t cookie;
spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx);
list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
vc, vd, cookie);
return cookie;
}
EXPORT_SYMBOL_GPL(vchan_tx_submit);
/**
* vchan_tx_desc_free - free a reusable descriptor
* @tx: the transfer
*
* This function frees a previously allocated reusable descriptor. The only
* other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
* transfer.
*
* Returns 0 upon success
*/
int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
{
struct virt_dma_chan *vc = to_virt_chan(tx->chan);
struct virt_dma_desc *vd = to_virt_desc(tx);
unsigned long flags;
spin_lock_irqsave(&vc->lock, flags);
list_del(&vd->node);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
vc, vd, vd->tx.cookie);
vc->desc_free(vd);
return 0;
}
EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
dma_cookie_t cookie)
{
struct virt_dma_desc *vd;
list_for_each_entry(vd, &vc->desc_issued, node)
if (vd->tx.cookie == cookie)
return vd;
return NULL;
}
EXPORT_SYMBOL_GPL(vchan_find_desc);
/*
* This tasklet handles the completion of a DMA descriptor by
* calling its callback and freeing it.
*/
static void vchan_complete(unsigned long arg)
{
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb;
LIST_HEAD(head);
spin_lock_irq(&vc->lock);
list_splice_tail_init(&vc->desc_completed, &head);
vd = vc->cyclic;
if (vd) {
vc->cyclic = NULL;
dmaengine_desc_get_callback(&vd->tx, &cb);
} else {
memset(&cb, 0, sizeof(cb));
}
spin_unlock_irq(&vc->lock);
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
list_for_each_entry_safe(vd, _vd, &head, node) {
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
vchan_vdesc_fini(vd);
}
}
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
{
struct virt_dma_desc *vd, *_vd;
list_for_each_entry_safe(vd, _vd, head, node) {
list_del(&vd->node);
vchan_vdesc_fini(vd);
}
}
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
{
dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock);
INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed);
INIT_LIST_HEAD(&vc->desc_terminated);
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
vc->chan.device = dmadev;
list_add_tail(&vc->chan.device_node, &dmadev->channels);
}
EXPORT_SYMBOL_GPL(vchan_init);
MODULE_AUTHOR("Russell King");
MODULE_LICENSE("GPL");