staging: most: fix channel operation in multi-aim context

This patch fixes the opening and closing process of a physical channel
when used by different AIMs.

Signed-off-by: Andrey Shvetsov <andrey.shvetsov@k2l.de>
Signed-off-by: Christian Gromm <christian.gromm@microchip.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Christian Gromm 2015-09-28 17:18:35 +02:00 committed by Greg Kroah-Hartman
parent 9161e9311c
commit f13f6981bc
6 changed files with 68 additions and 37 deletions

View file

@ -27,6 +27,7 @@ static dev_t aim_devno;
static struct class *aim_class;
static struct ida minor_id;
static unsigned int major;
static struct most_aim cdev_aim;
struct aim_channel {
wait_queue_head_t wq;
@ -96,7 +97,7 @@ static int aim_open(struct inode *inode, struct file *filp)
return -EBUSY;
}
ret = most_start_channel(channel->iface, channel->channel_id);
ret = most_start_channel(channel->iface, channel->channel_id, &cdev_aim);
if (ret)
atomic_dec(&channel->access_ref);
return ret;
@ -134,7 +135,7 @@ static int aim_close(struct inode *inode, struct file *filp)
most_put_mbo(mbo);
if (channel->keep_mbo)
most_put_mbo(channel->stacked_mbo);
ret = most_stop_channel(channel->iface, channel->channel_id);
ret = most_stop_channel(channel->iface, channel->channel_id, &cdev_aim);
atomic_dec(&channel->access_ref);
wake_up_interruptible(&channel->wq);
return ret;

View file

@ -79,6 +79,7 @@ struct net_dev_context {
static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
static struct spinlock list_lock;
static struct most_aim aim;
static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
@ -194,14 +195,14 @@ static int most_nd_open(struct net_device *dev)
BUG_ON(!nd->tx.linked || !nd->rx.linked);
if (most_start_channel(nd->iface, nd->rx.ch_id)) {
if (most_start_channel(nd->iface, nd->rx.ch_id, &aim)) {
netdev_err(dev, "most_start_channel() failed\n");
return -EBUSY;
}
if (most_start_channel(nd->iface, nd->tx.ch_id)) {
if (most_start_channel(nd->iface, nd->tx.ch_id, &aim)) {
netdev_err(dev, "most_start_channel() failed\n");
most_stop_channel(nd->iface, nd->rx.ch_id);
most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
return -EBUSY;
}
@ -227,8 +228,8 @@ static int most_nd_stop(struct net_device *dev)
netif_stop_queue(dev);
if (nd->channels_opened) {
most_stop_channel(nd->iface, nd->rx.ch_id);
most_stop_channel(nd->iface, nd->tx.ch_id);
most_stop_channel(nd->iface, nd->rx.ch_id, &aim);
most_stop_channel(nd->iface, nd->tx.ch_id, &aim);
nd->channels_opened = false;
}

View file

@ -26,6 +26,7 @@
#define DRIVER_NAME "sound"
static struct list_head dev_list;
static struct most_aim audio_aim;
/**
* struct channel - private structure to keep channel specific data
@ -298,7 +299,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
return PTR_ERR(channel->playback_task);
}
if (most_start_channel(channel->iface, channel->id)) {
if (most_start_channel(channel->iface, channel->id, &audio_aim)) {
pr_err("most_start_channel() failed!\n");
if (cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
@ -333,7 +334,7 @@ static int pcm_close(struct snd_pcm_substream *substream)
if (channel->cfg->direction == MOST_CH_TX)
kthread_stop(channel->playback_task);
most_stop_channel(channel->iface, channel->id);
most_stop_channel(channel->iface, channel->id, &audio_aim);
return 0;
}

View file

@ -32,6 +32,7 @@
#define V4L2_AIM_MAX_INPUT 1
static struct most_aim aim_info;
struct most_video_dev {
struct most_interface *iface;
@ -107,7 +108,7 @@ static int aim_vdev_open(struct file *filp)
v4l2_fh_add(&fh->fh);
ret = most_start_channel(mdev->iface, mdev->ch_idx);
ret = most_start_channel(mdev->iface, mdev->ch_idx, &aim_info);
if (ret) {
pr_err("most_start_channel() failed\n");
goto err_rm;
@ -151,7 +152,7 @@ static int aim_vdev_close(struct file *filp)
spin_lock(&mdev->list_lock);
}
spin_unlock(&mdev->list_lock);
most_stop_channel(mdev->iface, mdev->ch_idx);
most_stop_channel(mdev->iface, mdev->ch_idx, &aim_info);
mdev->mute = false;
v4l2_fh_del(&fh->fh);

View file

@ -44,7 +44,7 @@ struct most_c_obj {
atomic_t mbo_nq_level;
uint16_t channel_id;
bool is_poisoned;
bool is_started;
struct mutex start_mutex;
int is_starving;
struct most_interface *iface;
struct most_inst_obj *inst;
@ -57,6 +57,8 @@ struct most_c_obj {
struct list_head list;
struct most_aim *first_aim;
struct most_aim *second_aim;
int first_aim_refs;
int second_aim_refs;
struct list_head trash_fifo;
struct task_struct *hdm_enqueue_task;
struct mutex stop_task_mutex;
@ -1234,10 +1236,11 @@ static void arm_mbo(struct mbo *mbo)
list_add_tail(&mbo->list, &c->fifo);
spin_unlock_irqrestore(&c->fifo_lock, flags);
if (c->second_aim && c->second_aim->tx_completion)
c->second_aim->tx_completion(c->iface, c->channel_id);
if (c->first_aim && c->first_aim->tx_completion)
if (c->first_aim_refs && c->first_aim->tx_completion)
c->first_aim->tx_completion(c->iface, c->channel_id);
if (c->second_aim_refs && c->second_aim->tx_completion)
c->second_aim->tx_completion(c->iface, c->channel_id);
}
/**
@ -1441,11 +1444,12 @@ EXPORT_SYMBOL_GPL(most_put_mbo);
*/
static void most_read_completion(struct mbo *mbo)
{
struct most_c_obj *c;
struct most_c_obj *c = mbo->context;
c = mbo->context;
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
goto release_mbo;
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
trash_mbo(mbo);
return;
}
if (mbo->status == MBO_E_INVAL) {
nq_hdm_mbo(mbo);
@ -1458,16 +1462,15 @@ static void most_read_completion(struct mbo *mbo)
c->is_starving = 1;
}
if (c->first_aim && c->first_aim->rx_completion &&
if (c->first_aim_refs && c->first_aim->rx_completion &&
c->first_aim->rx_completion(mbo) == 0)
return;
if (c->second_aim && c->second_aim->rx_completion &&
if (c->second_aim_refs && c->second_aim->rx_completion &&
c->second_aim->rx_completion(mbo) == 0)
return;
pr_info("WARN: no driver linked with this channel\n");
mbo->status = MBO_E_CLOSE;
release_mbo:
trash_mbo(mbo);
most_put_mbo(mbo);
}
/**
@ -1480,7 +1483,8 @@ release_mbo:
*
* Returns 0 on success or error code otherwise.
*/
int most_start_channel(struct most_interface *iface, int id)
int most_start_channel(struct most_interface *iface, int id,
struct most_aim *aim)
{
int num_buffer;
int ret;
@ -1489,11 +1493,13 @@ int most_start_channel(struct most_interface *iface, int id)
if (unlikely(!c))
return -EINVAL;
if (c->is_started)
return -EBUSY;
mutex_lock(&c->start_mutex);
if (c->first_aim_refs + c->second_aim_refs > 0)
goto out; /* already started by other aim */
if (!try_module_get(iface->mod)) {
pr_info("failed to acquire HDM lock\n");
mutex_unlock(&c->start_mutex);
return -ENOLCK;
}
modref++;
@ -1523,14 +1529,22 @@ int most_start_channel(struct most_interface *iface, int id)
if (ret)
goto error;
c->is_started = true;
c->is_starving = 0;
atomic_set(&c->mbo_ref, num_buffer);
out:
if (aim == c->first_aim)
c->first_aim_refs++;
if (aim == c->second_aim)
c->second_aim_refs++;
mutex_unlock(&c->start_mutex);
return 0;
error:
if (iface->mod)
module_put(iface->mod);
modref--;
mutex_unlock(&c->start_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(most_start_channel);
@ -1540,7 +1554,8 @@ EXPORT_SYMBOL_GPL(most_start_channel);
* @iface: pointer to interface instance
* @id: channel ID
*/
int most_stop_channel(struct most_interface *iface, int id)
int most_stop_channel(struct most_interface *iface, int id,
struct most_aim *aim)
{
struct most_c_obj *c;
@ -1552,8 +1567,9 @@ int most_stop_channel(struct most_interface *iface, int id)
if (unlikely(!c))
return -EINVAL;
if (!c->is_started)
return 0;
mutex_lock(&c->start_mutex);
if (c->first_aim_refs + c->second_aim_refs >= 2)
goto out;
mutex_lock(&c->stop_task_mutex);
if (c->hdm_enqueue_task)
@ -1564,6 +1580,7 @@ int most_stop_channel(struct most_interface *iface, int id)
mutex_lock(&deregister_mutex);
if (atomic_read(&c->inst->tainted)) {
mutex_unlock(&deregister_mutex);
mutex_unlock(&c->start_mutex);
return -ENODEV;
}
mutex_unlock(&deregister_mutex);
@ -1577,6 +1594,7 @@ int most_stop_channel(struct most_interface *iface, int id)
if (c->iface->poison_channel(c->iface, c->channel_id)) {
pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
c->iface->description);
mutex_unlock(&c->start_mutex);
return -EAGAIN;
}
flush_trash_fifo(c);
@ -1585,13 +1603,20 @@ int most_stop_channel(struct most_interface *iface, int id)
#ifdef CMPL_INTERRUPTIBLE
if (wait_for_completion_interruptible(&c->cleanup)) {
pr_info("Interrupted while clean up ch %d\n", c->channel_id);
mutex_unlock(&c->start_mutex);
return -EINTR;
}
#else
wait_for_completion(&c->cleanup);
#endif
c->is_poisoned = false;
c->is_started = false;
out:
if (aim == c->first_aim)
c->first_aim_refs--;
if (aim == c->second_aim)
c->second_aim_refs--;
mutex_unlock(&c->start_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(most_stop_channel);
@ -1725,7 +1750,6 @@ struct kobject *most_register_interface(struct most_interface *iface)
c->keep_mbo = false;
c->enqueue_halt = false;
c->is_poisoned = false;
c->is_started = false;
c->cfg.direction = 0;
c->cfg.data_type = 0;
c->cfg.num_buffers = 0;
@ -1738,6 +1762,7 @@ struct kobject *most_register_interface(struct most_interface *iface)
INIT_LIST_HEAD(&c->halt_fifo);
init_completion(&c->cleanup);
atomic_set(&c->mbo_ref, 0);
mutex_init(&c->start_mutex);
mutex_init(&c->stop_task_mutex);
list_add_tail(&c->list, &inst->channel_list);
}
@ -1784,7 +1809,7 @@ void most_deregister_interface(struct most_interface *iface)
}
list_for_each_entry(c, &i->channel_list, list) {
if (!c->is_started)
if (c->first_aim_refs + c->second_aim_refs <= 0)
continue;
mutex_lock(&c->stop_task_mutex);

View file

@ -309,8 +309,10 @@ int most_register_aim(struct most_aim *aim);
int most_deregister_aim(struct most_aim *aim);
struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx);
void most_put_mbo(struct mbo *mbo);
int most_start_channel(struct most_interface *iface, int channel_idx);
int most_stop_channel(struct most_interface *iface, int channel_idx);
int most_start_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);
int most_stop_channel(struct most_interface *iface, int channel_idx,
struct most_aim *);
#endif /* MOST_CORE_H_ */