1
0
Fork 0

Merge branch 'pxp/next' into next

* pxp/next:
  media: pxp device: fix kernel dump when run pxp_test
  media: v4l2: add pxp_v4l2 driver
  dma: pxp: porting pxp dma driver from imx_4.19.y
5.4-rM2-2.2.x-imx-squashed
Dong Aisheng 2019-12-02 18:05:22 +08:00
commit 2c04041962
17 changed files with 40822 additions and 0 deletions

View File

@ -438,6 +438,8 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into some Freescale chips.
source "drivers/dma/pxp/Kconfig"
config MX3_IPU
bool "MX3x Image Processing Unit support"
depends on ARCH_MXC

View File

@ -60,6 +60,8 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_MXC_PXP_V2) += pxp/
obj-$(CONFIG_MXC_PXP_V3) += pxp/
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o

View File

@ -0,0 +1,22 @@
config MXC_PXP_V2
bool "MXC PxP V2 support"
depends on ARM
select DMA_ENGINE
help
Support the PxP (Pixel Pipeline) on i.MX6 DualLite and i.MX6 SoloLite.
If unsure, select N.
config MXC_PXP_V3
bool "MXC PxP V3 support"
depends on ARM
select DMA_ENGINE
help
Support the PxP V3(Pixel Pipeline) on i.MX7D. The PxP V3 supports
more functions than PxP V2, dithering, reagl/-D and etc.
If unsure, select N.
config MXC_PXP_CLIENT_DEVICE
bool "MXC PxP Client Device"
default y
depends on MXC_PXP_V2 || MXC_PXP_V3

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MXC_PXP_V2) += pxp_dma_v2.o
obj-$(CONFIG_MXC_PXP_V3) += pxp_dma_v3.o
obj-$(CONFIG_MXC_PXP_CLIENT_DEVICE) += pxp_device.o

View File

@ -0,0 +1,897 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010-2015 Freescale Semiconductor, Inc. All Rights Reserved.
*/
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/pxp_device.h>
#include <linux/atomic.h>
#include <linux/platform_data/dma-imx.h>
#define BUFFER_HASH_ORDER 4
static struct pxp_buffer_hash bufhash;
static struct pxp_irq_info irq_info[NR_PXP_VIRT_CHANNEL];
static int major;
static struct class *pxp_class;
static struct device *pxp_dev;
static int pxp_ht_create(struct pxp_buffer_hash *hash, int order)
{
unsigned long i;
unsigned long table_size;
table_size = 1U << order;
hash->order = order;
hash->hash_table = kmalloc(sizeof(*hash->hash_table) * table_size, GFP_KERNEL);
if (!hash->hash_table) {
pr_err("%s: Out of memory for hash table\n", __func__);
return -ENOMEM;
}
for (i = 0; i < table_size; i++)
INIT_HLIST_HEAD(&hash->hash_table[i]);
return 0;
}
static int pxp_ht_insert_item(struct pxp_buffer_hash *hash,
struct pxp_buf_obj *new)
{
unsigned long hashkey;
struct hlist_head *h_list;
hashkey = hash_long(new->offset >> PAGE_SHIFT, hash->order);
h_list = &hash->hash_table[hashkey];
spin_lock(&hash->hash_lock);
hlist_add_head_rcu(&new->item, h_list);
spin_unlock(&hash->hash_lock);
return 0;
}
static int pxp_ht_remove_item(struct pxp_buffer_hash *hash,
struct pxp_buf_obj *obj)
{
spin_lock(&hash->hash_lock);
hlist_del_init_rcu(&obj->item);
spin_unlock(&hash->hash_lock);
return 0;
}
static struct hlist_node *pxp_ht_find_key(struct pxp_buffer_hash *hash,
unsigned long key)
{
struct pxp_buf_obj *entry;
struct hlist_head *h_list;
unsigned long hashkey;
hashkey = hash_long(key, hash->order);
h_list = &hash->hash_table[hashkey];
hlist_for_each_entry_rcu(entry, h_list, item) {
if (entry->offset >> PAGE_SHIFT == key)
return &entry->item;
}
return NULL;
}
static void pxp_ht_destroy(struct pxp_buffer_hash *hash)
{
kfree(hash->hash_table);
hash->hash_table = NULL;
}
static int pxp_buffer_handle_create(struct pxp_file *file_priv,
struct pxp_buf_obj *obj,
uint32_t *handlep)
{
int ret;
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->buffer_lock);
ret = idr_alloc(&file_priv->buffer_idr, obj, 1, 0, GFP_NOWAIT);
spin_unlock(&file_priv->buffer_lock);
idr_preload_end();
if (ret < 0)
return ret;
*handlep = ret;
return 0;
}
static struct pxp_buf_obj *
pxp_buffer_object_lookup(struct pxp_file *file_priv,
uint32_t handle)
{
struct pxp_buf_obj *obj;
spin_lock(&file_priv->buffer_lock);
obj = idr_find(&file_priv->buffer_idr, handle);
if (!obj) {
spin_unlock(&file_priv->buffer_lock);
return NULL;
}
spin_unlock(&file_priv->buffer_lock);
return obj;
}
static int pxp_buffer_handle_delete(struct pxp_file *file_priv,
uint32_t handle)
{
struct pxp_buf_obj *obj;
spin_lock(&file_priv->buffer_lock);
obj = idr_find(&file_priv->buffer_idr, handle);
if (!obj) {
spin_unlock(&file_priv->buffer_lock);
return -EINVAL;
}
idr_remove(&file_priv->buffer_idr, handle);
spin_unlock(&file_priv->buffer_lock);
return 0;
}
static int pxp_channel_handle_create(struct pxp_file *file_priv,
struct pxp_chan_obj *obj,
uint32_t *handlep)
{
int ret;
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->channel_lock);
ret = idr_alloc(&file_priv->channel_idr, obj, 0, 0, GFP_NOWAIT);
spin_unlock(&file_priv->channel_lock);
idr_preload_end();
if (ret < 0)
return ret;
*handlep = ret;
return 0;
}
static struct pxp_chan_obj *
pxp_channel_object_lookup(struct pxp_file *file_priv,
uint32_t handle)
{
struct pxp_chan_obj *obj;
spin_lock(&file_priv->channel_lock);
obj = idr_find(&file_priv->channel_idr, handle);
if (!obj) {
spin_unlock(&file_priv->channel_lock);
return NULL;
}
spin_unlock(&file_priv->channel_lock);
return obj;
}
static int pxp_channel_handle_delete(struct pxp_file *file_priv,
uint32_t handle)
{
struct pxp_chan_obj *obj;
spin_lock(&file_priv->channel_lock);
obj = idr_find(&file_priv->channel_idr, handle);
if (!obj) {
spin_unlock(&file_priv->channel_lock);
return -EINVAL;
}
idr_remove(&file_priv->channel_idr, handle);
spin_unlock(&file_priv->channel_lock);
return 0;
}
static int pxp_alloc_dma_buffer(struct pxp_buf_obj *obj)
{
obj->virtual = dma_alloc_coherent(pxp_dev, PAGE_ALIGN(obj->size),
(dma_addr_t *) (&obj->offset),
GFP_DMA | GFP_KERNEL);
pr_debug("[ALLOC] mem alloc phys_addr = 0x%lx\n", obj->offset);
if (obj->virtual == NULL) {
printk(KERN_ERR "Physical memory allocation error!\n");
return -1;
}
return 0;
}
static void pxp_free_dma_buffer(struct pxp_buf_obj *obj)
{
if (obj->virtual != NULL) {
dma_free_coherent(pxp_dev, PAGE_ALIGN(obj->size),
obj->virtual, (dma_addr_t)obj->offset);
}
}
static int
pxp_buffer_object_free(int id, void *ptr, void *data)
{
struct pxp_file *file_priv = data;
struct pxp_buf_obj *obj = ptr;
int ret;
ret = pxp_buffer_handle_delete(file_priv, obj->handle);
if (ret < 0)
return ret;
pxp_ht_remove_item(&bufhash, obj);
pxp_free_dma_buffer(obj);
kfree(obj);
return 0;
}
static int
pxp_channel_object_free(int id, void *ptr, void *data)
{
struct pxp_file *file_priv = data;
struct pxp_chan_obj *obj = ptr;
int chan_id;
chan_id = obj->chan->chan_id;
wait_event(irq_info[chan_id].waitq,
atomic_read(&irq_info[chan_id].irq_pending) == 0);
pxp_channel_handle_delete(file_priv, obj->handle);
dma_release_channel(obj->chan);
kfree(obj);
return 0;
}
static void pxp_free_buffers(struct pxp_file *file_priv)
{
idr_for_each(&file_priv->buffer_idr,
&pxp_buffer_object_free, file_priv);
idr_destroy(&file_priv->buffer_idr);
}
static void pxp_free_channels(struct pxp_file *file_priv)
{
idr_for_each(&file_priv->channel_idr,
&pxp_channel_object_free, file_priv);
idr_destroy(&file_priv->channel_idr);
}
/* Callback function triggered after PxP receives an EOF interrupt */
static void pxp_dma_done(void *arg)
{
struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
struct dma_chan *chan = tx_desc->txd.chan;
struct pxp_channel *pxp_chan = to_pxp_channel(chan);
int chan_id = pxp_chan->dma_chan.chan_id;
pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
atomic_dec(&irq_info[chan_id].irq_pending);
irq_info[chan_id].hist_status = tx_desc->hist_status;
wake_up(&(irq_info[chan_id].waitq));
}
static int pxp_ioc_config_chan(struct pxp_file *priv, unsigned long arg)
{
struct scatterlist *sg;
struct pxp_tx_desc *desc;
struct dma_async_tx_descriptor *txd;
struct pxp_config_data *pxp_conf;
dma_cookie_t cookie;
int handle, chan_id;
struct dma_chan *chan;
struct pxp_chan_obj *obj;
int i = 0, j = 0, k = 0, m = 0, length, ret, sg_len;
pxp_conf = kzalloc(sizeof(*pxp_conf), GFP_KERNEL);
if (!pxp_conf)
return -ENOMEM;
ret = copy_from_user(pxp_conf,
(struct pxp_config_data *)arg,
sizeof(struct pxp_config_data));
if (ret) {
kfree(pxp_conf);
return -EFAULT;
}
handle = pxp_conf->handle;
obj = pxp_channel_object_lookup(priv, handle);
if (!obj) {
kfree(pxp_conf);
return -EINVAL;
}
chan = obj->chan;
chan_id = chan->chan_id;
sg_len = 3;
if (pxp_conf->proc_data.engine_enable & PXP_ENABLE_WFE_A)
sg_len += 4;
if (pxp_conf->proc_data.engine_enable & PXP_ENABLE_WFE_B)
sg_len += 4;
if (pxp_conf->proc_data.engine_enable & PXP_ENABLE_DITHER)
sg_len += 4;
sg = kmalloc(sizeof(*sg) * sg_len, GFP_KERNEL);
if (!sg) {
kfree(pxp_conf);
return -ENOMEM;
}
sg_init_table(sg, sg_len);
txd = chan->device->device_prep_slave_sg(chan,
sg, sg_len,
DMA_TO_DEVICE,
DMA_PREP_INTERRUPT,
NULL);
if (!txd) {
pr_err("Error preparing a DMA transaction descriptor.\n");
kfree(pxp_conf);
kfree(sg);
return -EIO;
}
txd->callback_param = txd;
txd->callback = pxp_dma_done;
desc = to_tx_desc(txd);
length = desc->len;
for (i = 0; i < length; i++) {
if (i == 0) { /* S0 */
memcpy(&desc->proc_data,
&pxp_conf->proc_data,
sizeof(struct pxp_proc_data));
memcpy(&desc->layer_param.s0_param,
&pxp_conf->s0_param,
sizeof(struct pxp_layer_param));
desc = desc->next;
} else if (i == 1) { /* Output */
memcpy(&desc->layer_param.out_param,
&pxp_conf->out_param,
sizeof(struct pxp_layer_param));
desc = desc->next;
} else if (i == 2) {
/* OverLay */
memcpy(&desc->layer_param.ol_param,
&pxp_conf->ol_param,
sizeof(struct pxp_layer_param));
desc = desc->next;
} else if ((pxp_conf->proc_data.engine_enable & PXP_ENABLE_WFE_A) && (j < 4)) {
for (j = 0; j < 4; j++) {
if (j == 0) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_a_fetch_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_A_FETCH0;
} else if (j == 1) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_a_fetch_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_A_FETCH1;
} else if (j == 2) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_a_store_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_A_STORE0;
} else if (j == 3) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_a_store_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_A_STORE1;
}
desc = desc->next;
}
i += 4;
} else if ((pxp_conf->proc_data.engine_enable & PXP_ENABLE_WFE_B) && (m < 4)) {
for (m = 0; m < 4; m++) {
if (m == 0) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_b_fetch_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_B_FETCH0;
} else if (m == 1) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_b_fetch_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_B_FETCH1;
} else if (m == 2) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_b_store_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_B_STORE0;
} else if (m == 3) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->wfe_b_store_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_WFE_B_STORE1;
}
desc = desc->next;
}
i += 4;
} else if ((pxp_conf->proc_data.engine_enable & PXP_ENABLE_DITHER) && (k < 4)) {
for (k = 0; k < 4; k++) {
if (k == 0) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->dither_fetch_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_DITHER_FETCH0;
} else if (k == 1) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->dither_fetch_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_DITHER_FETCH1;
} else if (k == 2) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->dither_store_param[0],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_DITHER_STORE0;
} else if (k == 3) {
memcpy(&desc->layer_param.processing_param,
&pxp_conf->dither_store_param[1],
sizeof(struct pxp_layer_param));
desc->layer_param.processing_param.flag = PXP_BUF_FLAG_DITHER_STORE1;
}
desc = desc->next;
}
i += 4;
}
}
cookie = txd->tx_submit(txd);
if (cookie < 0) {
pr_err("Error tx_submit\n");
kfree(pxp_conf);
kfree(sg);
return -EIO;
}
atomic_inc(&irq_info[chan_id].irq_pending);
kfree(pxp_conf);
kfree(sg);
return 0;
}
static int pxp_device_open(struct inode *inode, struct file *filp)
{
struct pxp_file *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
filp->private_data = priv;
priv->filp = filp;
idr_init(&priv->buffer_idr);
spin_lock_init(&priv->buffer_lock);
idr_init(&priv->channel_idr);
spin_lock_init(&priv->channel_lock);
return 0;
}
static int pxp_device_release(struct inode *inode, struct file *filp)
{
struct pxp_file *priv = filp->private_data;
if (priv) {
pxp_free_channels(priv);
pxp_free_buffers(priv);
kfree(priv);
filp->private_data = NULL;
}
return 0;
}
static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
{
int request_size;
struct hlist_node *node;
struct pxp_buf_obj *obj;
request_size = vma->vm_end - vma->vm_start;
pr_debug("start=0x%x, pgoff=0x%x, size=0x%x\n",
(unsigned int)(vma->vm_start), (unsigned int)(vma->vm_pgoff),
request_size);
node = pxp_ht_find_key(&bufhash, vma->vm_pgoff);
if (!node)
return -EINVAL;
obj = list_entry(node, struct pxp_buf_obj, item);
if (obj->offset + (obj->size >> PAGE_SHIFT) <
(vma->vm_pgoff + vma_pages(vma)))
return -ENOMEM;
switch (obj->mem_type) {
case MEMORY_TYPE_UNCACHED:
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
break;
case MEMORY_TYPE_WC:
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
break;
case MEMORY_TYPE_CACHED:
break;
default:
pr_err("%s: invalid memory type!\n", __func__);
return -EINVAL;
}
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
request_size, vma->vm_page_prot) ? -EAGAIN : 0;
}
static bool chan_filter(struct dma_chan *chan, void *arg)
{
if (imx_dma_is_pxp(chan))
return true;
else
return false;
}
static long pxp_device_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
int ret = 0;
struct pxp_file *file_priv = filp->private_data;
switch (cmd) {
case PXP_IOC_GET_CHAN:
{
int ret;
struct dma_chan *chan = NULL;
dma_cap_mask_t mask;
struct pxp_chan_obj *obj = NULL;
pr_debug("drv: PXP_IOC_GET_CHAN Line %d\n", __LINE__);
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_PRIVATE, mask);
chan = dma_request_channel(mask, chan_filter, NULL);
if (!chan) {
pr_err("Unsccessfully received channel!\n");
return -EBUSY;
}
pr_debug("Successfully received channel."
"chan_id %d\n", chan->chan_id);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj) {
dma_release_channel(chan);
return -ENOMEM;
}
obj->chan = chan;
ret = pxp_channel_handle_create(file_priv, obj,
&obj->handle);
if (ret) {
dma_release_channel(chan);
kfree(obj);
return ret;
}
init_waitqueue_head(&(irq_info[chan->chan_id].waitq));
if (put_user(obj->handle, (u32 __user *) arg)) {
pxp_channel_handle_delete(file_priv, obj->handle);
dma_release_channel(chan);
kfree(obj);
return -EFAULT;
}
break;
}
case PXP_IOC_PUT_CHAN:
{
int handle;
struct pxp_chan_obj *obj;
if (get_user(handle, (u32 __user *) arg))
return -EFAULT;
pr_debug("%d release handle %d\n", __LINE__, handle);
obj = pxp_channel_object_lookup(file_priv, handle);
if (!obj)
return -EINVAL;
pxp_channel_handle_delete(file_priv, obj->handle);
dma_release_channel(obj->chan);
kfree(obj);
break;
}
case PXP_IOC_CONFIG_CHAN:
{
int ret;
ret = pxp_ioc_config_chan(file_priv, arg);
if (ret)
return ret;
break;
}
case PXP_IOC_START_CHAN:
{
int handle;
struct pxp_chan_obj *obj = NULL;
if (get_user(handle, (u32 __user *) arg))
return -EFAULT;
obj = pxp_channel_object_lookup(file_priv, handle);
if (!obj)
return -EINVAL;
dma_async_issue_pending(obj->chan);
break;
}
case PXP_IOC_GET_PHYMEM:
{
struct pxp_mem_desc buffer;
struct pxp_buf_obj *obj;
ret = copy_from_user(&buffer,
(struct pxp_mem_desc *)arg,
sizeof(struct pxp_mem_desc));
if (ret)
return -EFAULT;
pr_debug("[ALLOC] mem alloc size = 0x%x\n",
buffer.size);
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return -ENOMEM;
obj->size = buffer.size;
obj->mem_type = buffer.mtype;
ret = pxp_alloc_dma_buffer(obj);
if (ret == -1) {
printk(KERN_ERR
"Physical memory allocation error!\n");
kfree(obj);
return ret;
}
ret = pxp_buffer_handle_create(file_priv, obj, &obj->handle);
if (ret) {
pxp_free_dma_buffer(obj);
kfree(obj);
return ret;
}
buffer.handle = obj->handle;
buffer.phys_addr = obj->offset;
ret = copy_to_user((void __user *)arg, &buffer,
sizeof(struct pxp_mem_desc));
if (ret) {
pxp_buffer_handle_delete(file_priv, buffer.handle);
pxp_free_dma_buffer(obj);
kfree(obj);
return -EFAULT;
}
pxp_ht_insert_item(&bufhash, obj);
break;
}
case PXP_IOC_PUT_PHYMEM:
{
struct pxp_mem_desc pxp_mem;
struct pxp_buf_obj *obj;
ret = copy_from_user(&pxp_mem,
(struct pxp_mem_desc *)arg,
sizeof(struct pxp_mem_desc));
if (ret)
return -EACCES;
obj = pxp_buffer_object_lookup(file_priv, pxp_mem.handle);
if (!obj)
return -EINVAL;
ret = pxp_buffer_handle_delete(file_priv, obj->handle);
if (ret)
return ret;
pxp_ht_remove_item(&bufhash, obj);
pxp_free_dma_buffer(obj);
kfree(obj);
break;
}
case PXP_IOC_FLUSH_PHYMEM:
{
int ret;
struct pxp_mem_flush flush;
struct pxp_buf_obj *obj;
ret = copy_from_user(&flush,
(struct pxp_mem_flush *)arg,
sizeof(struct pxp_mem_flush));
if (ret)
return -EACCES;
obj = pxp_buffer_object_lookup(file_priv, flush.handle);
if (!obj)
return -EINVAL;
switch (flush.type) {
case CACHE_CLEAN:
dma_sync_single_for_device(NULL, obj->offset,
obj->size, DMA_TO_DEVICE);
break;
case CACHE_INVALIDATE:
dma_sync_single_for_device(NULL, obj->offset,
obj->size, DMA_FROM_DEVICE);
break;
case CACHE_FLUSH:
dma_sync_single_for_device(NULL, obj->offset,
obj->size, DMA_TO_DEVICE);
dma_sync_single_for_device(NULL, obj->offset,
obj->size, DMA_FROM_DEVICE);
break;
default:
pr_err("%s: invalid cache flush type\n", __func__);
return -EINVAL;
}
break;
}
case PXP_IOC_WAIT4CMPLT:
{
struct pxp_chan_handle chan_handle;
int ret, chan_id, handle;
struct pxp_chan_obj *obj = NULL;
ret = copy_from_user(&chan_handle,
(struct pxp_chan_handle *)arg,
sizeof(struct pxp_chan_handle));
if (ret)
return -EFAULT;
handle = chan_handle.handle;
obj = pxp_channel_object_lookup(file_priv, handle);
if (!obj)
return -EINVAL;
chan_id = obj->chan->chan_id;
ret = wait_event_interruptible
(irq_info[chan_id].waitq,
(atomic_read(&irq_info[chan_id].irq_pending) == 0));
if (ret < 0)
return -ERESTARTSYS;
chan_handle.hist_status = irq_info[chan_id].hist_status;
ret = copy_to_user((struct pxp_chan_handle *)arg,
&chan_handle,
sizeof(struct pxp_chan_handle));
if (ret)
return -EFAULT;
break;
}
default:
break;
}
return 0;
}
static const struct file_operations pxp_device_fops = {
.open = pxp_device_open,
.release = pxp_device_release,
.unlocked_ioctl = pxp_device_ioctl,
.mmap = pxp_device_mmap,
};
int register_pxp_device(void)
{
int ret;
if (!major) {
major = register_chrdev(0, "pxp_device", &pxp_device_fops);
if (major < 0) {
printk(KERN_ERR "Unable to register pxp device\n");
ret = major;
goto register_cdev_fail;
}
pxp_class = class_create(THIS_MODULE, "pxp_device");
if (IS_ERR(pxp_class)) {
ret = PTR_ERR(pxp_class);
goto pxp_class_fail;
}
pxp_dev = device_create(pxp_class, NULL, MKDEV(major, 0),
NULL, "pxp_device");
if (IS_ERR(pxp_dev)) {
ret = PTR_ERR(pxp_dev);
goto dev_create_fail;
}
pxp_dev->dma_mask = kmalloc(sizeof(*pxp_dev->dma_mask),
GFP_KERNEL);
*pxp_dev->dma_mask = DMA_BIT_MASK(32);
pxp_dev->coherent_dma_mask = DMA_BIT_MASK(32);
}
ret = pxp_ht_create(&bufhash, BUFFER_HASH_ORDER);
if (ret) {
goto ht_create_fail;
}
spin_lock_init(&(bufhash.hash_lock));
pr_debug("PxP_Device registered Successfully\n");
return 0;
ht_create_fail:
device_destroy(pxp_class, MKDEV(major, 0));
dev_create_fail:
class_destroy(pxp_class);
pxp_class_fail:
unregister_chrdev(major, "pxp_device");
register_cdev_fail:
return ret;
}
void unregister_pxp_device(void)
{
pxp_ht_destroy(&bufhash);
if (major) {
device_destroy(pxp_class, MKDEV(major, 0));
class_destroy(pxp_class);
unregister_chrdev(major, "pxp_device");
major = 0;
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
*/
#ifndef _REG_BITFIELDS_H
#define _REG_BITFIELDS_H
struct mux_config {
uint32_t mux0_sel : 2;
uint32_t mux1_sel : 2;
uint32_t mux2_sel : 2;
uint32_t mux3_sel : 2;
uint32_t mux4_sel : 2;
uint32_t mux5_sel : 2;
uint32_t mux6_sel : 2;
uint32_t mux7_sel : 2;
uint32_t mux8_sel : 2;
uint32_t mux9_sel : 2;
uint32_t mux10_sel : 2;
uint32_t mux11_sel : 2;
uint32_t mux12_sel : 2;
uint32_t mux13_sel : 2;
uint32_t mux14_sel : 2;
uint32_t mux15_sel : 2;
};
/* legacy engine registers */
struct ps_ctrl {
uint32_t format : 6;
uint32_t wb_swap : 1;
uint32_t rsvd0 : 1;
uint32_t decy : 2;
uint32_t decx : 2;
uint32_t rsvd1 : 20;
};
struct ps_scale {
uint32_t xscale : 15;
uint32_t rsvd1 : 1;
uint32_t yscale : 15;
uint32_t rsvd2 : 1;
};
struct ps_offset {
uint32_t xoffset : 12;
uint32_t rsvd1 : 4;
uint32_t yoffset : 12;
uint32_t rsvd2 : 4;
};
struct as_ctrl {
uint32_t rsvd0 : 1;
uint32_t alpha_ctrl : 2;
uint32_t enable_colorkey : 1;
uint32_t format : 4;
uint32_t alpha : 8;
uint32_t rop : 4;
uint32_t alpha0_invert : 1;
uint32_t alpha1_invert : 1;
uint32_t rsvd1 : 10;
};
struct out_ctrl {
uint32_t format : 5;
uint32_t rsvd0 : 3;
uint32_t interlaced_output : 2;
uint32_t rsvd1 : 13;
uint32_t alpha_output : 1;
uint32_t alpha : 8;
};
struct coordinate {
uint32_t y : 14;
uint32_t rsvd0 : 2;
uint32_t x : 14;
uint32_t rsvd1 : 2;
};
struct pxp_alpha_ctrl {
uint32_t poter_duff_enable : 1;
uint32_t s0_s1_factor_mode : 2;
uint32_t s0_global_alpha_mode : 2;
uint32_t s0_alpha_mode : 1;
uint32_t s0_color_mode : 1;
uint32_t rsvd1 : 1;
uint32_t s1_s0_factor_mode : 2;
uint32_t s1_global_alpha_mode : 2;
uint32_t s1_alpha_mode : 1;
uint32_t s1_color_mode : 1;
uint32_t rsvd0 : 2;
uint32_t s0_global_alpha : 8;
uint32_t s1_global_alpha : 8;
};
/* store engine registers */
struct store_ctrl {
uint32_t ch_en : 1;
uint32_t block_en : 1;
uint32_t block_16 : 1;
uint32_t handshake_en : 1;
uint32_t array_en : 1;
uint32_t array_line_num : 2;
uint32_t rsvd3 : 1;
uint32_t store_bypass_en : 1;
uint32_t store_memory_en : 1;
uint32_t pack_in_sel : 1;
uint32_t fill_data_en : 1;
uint32_t rsvd2 : 4;
uint32_t wr_num_bytes : 2;
uint32_t rsvd1 : 6;
uint32_t combine_2channel : 1;
uint32_t rsvd0 : 6;
uint32_t arbit_en : 1;
};
struct store_size {
uint32_t out_width : 16;
uint32_t out_height : 16;
};
struct store_pitch {
uint32_t ch0_out_pitch : 16;
uint32_t ch1_out_pitch : 16;
};
struct store_shift_ctrl {
uint32_t rsvd2 : 2;
uint32_t output_active_bpp : 2;
uint32_t out_yuv422_1p_en : 1;
uint32_t out_yuv422_2p_en : 1;
uint32_t rsvd1 : 1;
uint32_t shift_bypass : 1;
uint32_t rsvd0 : 24;
};
struct store_d_shift {
uint64_t d_shift_width0 : 6;
uint64_t rsvd3 : 1;
uint64_t d_shift_flag0 : 1;
uint64_t d_shift_width1 : 6;
uint64_t rsvd2 : 1;
uint64_t d_shift_flag1 : 1;
uint64_t d_shift_width2 : 6;
uint64_t rsvd1 : 1;
uint64_t d_shift_flag2 : 1;
uint64_t d_shift_width3 : 6;
uint64_t rsvd0 : 1;
uint64_t d_shift_flag3 : 1;
uint64_t d_shift_width4 : 6;
uint64_t rsvd7 : 1;
uint64_t d_shift_flag4 : 1;
uint64_t d_shift_width5 : 6;
uint64_t rsvd6 : 1;
uint64_t d_shift_flag5 : 1;
uint64_t d_shift_width6 : 6;
uint64_t rsvd5 : 1;
uint64_t d_shift_flag6 : 1;
uint64_t d_shift_width7 : 6;
uint64_t rsvd4 : 1;
uint64_t d_shift_flag7 : 1;
};
struct store_f_shift {
uint64_t f_shift_width0 : 6;
uint64_t rsvd3 : 1;
uint64_t f_shift_flag0 : 1;
uint64_t f_shift_width1 : 6;
uint64_t rsvd2 : 1;
uint64_t f_shift_flag1 : 1;
uint64_t f_shift_width2 : 6;
uint64_t rsvd1 : 1;
uint64_t f_shift_flag2 : 1;
uint64_t f_shift_width3 : 6;
uint64_t rsvd0 : 1;
uint64_t f_shift_flag3 : 1;
uint64_t f_shift_width4 : 6;
uint64_t rsvd7 : 1;
uint64_t f_shift_flag4 : 1;
uint64_t f_shift_width5 : 6;
uint64_t rsvd6 : 1;
uint64_t f_shift_flag5 : 1;
uint64_t f_shift_width6 : 6;
uint64_t rsvd5 : 1;
uint64_t f_shift_flag6 : 1;
uint64_t f_shift_width7 : 6;
uint64_t rsvd4 : 1;
uint64_t f_shift_flag7 : 1;
};
struct store_d_mask {
uint64_t d_mask_l : 32;
uint64_t d_mask_h : 32;
};
/* fetch engine registers */
struct fetch_ctrl {
uint32_t ch_en : 1;
uint32_t block_en : 1;
uint32_t block_16 : 1;
uint32_t handshake_en : 1;
uint32_t bypass_pixel_en : 1;
uint32_t high_byte : 1;
uint32_t rsvd4 : 3;
uint32_t hflip : 1;
uint32_t vflip : 1;
uint32_t rsvd3 : 1;
uint32_t rotation_angle : 2;
uint32_t rsvd2 : 2;
uint32_t rd_num_bytes : 2;
uint32_t rsvd1 : 6;
uint32_t handshake_scan_line_num : 2;
uint32_t rsvd0 : 5;
uint32_t arbit_en : 1;
};
struct fetch_active_size_ulc {
uint32_t active_size_ulc_x : 16;
uint32_t active_size_ulc_y : 16;
};
struct fetch_active_size_lrc {
uint32_t active_size_lrc_x : 16;
uint32_t active_size_lrc_y : 16;
};
struct fetch_size {
uint32_t input_total_width : 16;
uint32_t input_total_height : 16;
};
struct fetch_pitch {
uint32_t ch0_input_pitch : 16;
uint32_t ch1_input_pitch : 16;
};
struct fetch_shift_ctrl {
uint32_t input_active_bpp : 2;
uint32_t rsvd1 : 6;
uint32_t expand_format : 3;
uint32_t expand_en : 1;
uint32_t shift_bypass : 1;
uint32_t rsvd0 : 19;
};
struct fetch_shift_offset {
uint32_t offset0 : 5;
uint32_t rsvd3 : 3;
uint32_t offset1 : 5;
uint32_t rsvd2 : 3;
uint32_t offset2 : 5;
uint32_t rsvd1 : 3;
uint32_t offset3 : 5;
uint32_t rsvd0 : 3;
};
struct fetch_shift_width {
uint32_t width0 : 4;
uint32_t width1 : 4;
uint32_t width2 : 4;
uint32_t width3 : 4;
uint32_t rsvd0 : 16;
};
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,3 +3,14 @@ config VIDEO_MXC_IPU_OUTPUT
depends on VIDEO_MXC_OUTPUT && MXC_IPU
help
This is the video4linux2 driver for IPU post processing video output.
config VIDEO_MXC_PXP_V4L2
tristate "MXC PxP V4L2 driver"
depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF_DMA_CONTIG
help
This is a video4linux driver for the Freescale PxP
(Pixel Pipeline). This module supports output overlay of
the MXC framebuffer on a video stream.
To compile this driver as a module, choose M here.

View File

@ -1 +1,2 @@
obj-$(CONFIG_VIDEO_MXC_IPU_OUTPUT) += mxc_vout.o
obj-$(CONFIG_VIDEO_MXC_PXP_V4L2) += mxc_pxp_v4l2.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
* Copyright 2019 NXP
*/
/*
* Based on STMP378X PxP driver
* Copyright 2008-2009 Embedded Alley Solutions, Inc All Rights Reserved.
*/
#ifndef _MXC_PXP_V4L2_H
#define _MXC_PXP_V4L2_H
#include <linux/dmaengine.h>
#include <linux/pxp_dma.h>
struct pxp_buffer {
/* Must be first! */
struct videobuf_buffer vb;
/* One descriptor per scatterlist (per frame) */
struct dma_async_tx_descriptor *txd;
struct scatterlist sg[3];
};
struct dma_mem {
void *vaddr;
dma_addr_t paddr;
size_t size;
};
struct pxps {
struct platform_device *pdev;
spinlock_t lock;
struct mutex mutex;
int users;
struct video_device *vdev;
struct videobuf_queue s0_vbq;
struct pxp_buffer *active;
struct list_head outq;
struct pxp_channel *pxp_channel[1]; /* We need 1 channel */
struct pxp_config_data pxp_conf;
struct dma_mem outbuf;
int output;
/* Current S0 configuration */
struct pxp_data_format *s0_fmt;
struct fb_info *fbi;
struct v4l2_framebuffer fb;
/* Output overlay support */
int overlay_state;
int global_alpha_state;
u8 global_alpha;
int s1_chromakey_state;
u32 s1_chromakey;
int fb_blank;
};
struct pxp_data_format {
char *name;
unsigned int bpp;
u32 fourcc;
enum v4l2_colorspace colorspace;
};
#endif

View File

@ -65,6 +65,11 @@ static inline int imx_dma_is_ipu(struct dma_chan *chan)
return !strcmp(dev_name(chan->device->dev), "ipu-core");
}
static inline int imx_dma_is_pxp(struct dma_chan *chan)
{
return strstr(dev_name(chan->device->dev), "pxp") != NULL;
}
static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
{
return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright 2019 NXP
*/
#ifndef _PXP_DEVICE
#define _PXP_DEVICE
#include <linux/idr.h>
#include <linux/hash.h>
#include <uapi/linux/pxp_device.h>
struct pxp_irq_info {
wait_queue_head_t waitq;
atomic_t irq_pending;
int hist_status;
};
struct pxp_buffer_hash {
struct hlist_head *hash_table;
u32 order;
spinlock_t hash_lock;
};
struct pxp_buf_obj {
uint32_t handle;
uint32_t size;
uint32_t mem_type;
unsigned long offset;
void *virtual;
struct hlist_node item;
};
struct pxp_chan_obj {
uint32_t handle;
struct dma_chan *chan;
};
/* File private data */
struct pxp_file {
struct file *filp;
/* record allocated dma buffer */
struct idr buffer_idr;
spinlock_t buffer_lock;
/* record allocated dma channel */
struct idr channel_idr;
spinlock_t channel_lock;
};
#endif

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2010-2015 Freescale Semiconductor, Inc. All Rights Reserved.
*
* Copyright 2017-2019 NXP
*/
#ifndef _PXP_DMA
#define _PXP_DMA
#include <uapi/linux/pxp_dma.h>
struct pxp_tx_desc {
struct dma_async_tx_descriptor txd;
struct list_head tx_list;
struct list_head list;
int len;
union {
struct pxp_layer_param s0_param;
struct pxp_layer_param out_param;
struct pxp_layer_param ol_param;
struct pxp_layer_param processing_param;
} layer_param;
struct pxp_proc_data proc_data;
u32 hist_status; /* Histogram output status */
struct pxp_tx_desc *next;
};
struct pxp_channel {
struct dma_chan dma_chan;
dma_cookie_t completed; /* last completed cookie */
enum pxp_channel_status status;
void *client; /* Only one client per channel */
unsigned int n_tx_desc;
struct pxp_tx_desc *desc; /* allocated tx-descriptors */
struct list_head active_list; /* active tx-descriptors */
struct list_head free_list; /* free tx-descriptors */
struct list_head queue; /* queued tx-descriptors */
struct list_head list; /* track queued channel number */
spinlock_t lock; /* protects sg[0,1], queue */
struct mutex chan_mutex; /* protects status, cookie, free_list */
int active_buffer;
unsigned int eof_irq;
char eof_name[16]; /* EOF IRQ name for request_irq() */
};
#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
void pxp_txd_ack(struct dma_async_tx_descriptor *txd,
struct pxp_channel *pxp_chan);
#ifdef CONFIG_MXC_PXP_CLIENT_DEVICE
int register_pxp_device(void);
void unregister_pxp_device(void);
#else
static int register_pxp_device(void) { return 0; }
static void unregister_pxp_device(void) {}
#endif
void pxp_fill(
u32 bpp,
u32 value,
u32 width,
u32 height,
u32 output_buffer,
u32 output_pitch);
void m4_process(void);
#endif