remarkable-linux/drivers/iio/buffer_cb.c
Lars-Peter Clausen 225d59adf1 iio: Specify supported modes for buffers
For each buffer type specify the supported device modes for this buffer.
This allows us for devices which support multiple different operating modes
to pick the correct operating mode based on the modes supported by the
attached buffers.

It also prevents that buffers with conflicting modes are attached
to a device at the same time or that a buffer with a non-supported mode is
attached to a device (e.g. in-kernel callback buffer to a device only
supporting hardware mode).

Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
2015-06-01 11:31:12 +01:00

127 lines
3.1 KiB
C

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/iio/buffer.h>
#include <linux/iio/consumer.h>
struct iio_cb_buffer {
struct iio_buffer buffer;
int (*cb)(const void *data, void *private);
void *private;
struct iio_channel *channels;
};
static struct iio_cb_buffer *buffer_to_cb_buffer(struct iio_buffer *buffer)
{
return container_of(buffer, struct iio_cb_buffer, buffer);
}
static int iio_buffer_cb_store_to(struct iio_buffer *buffer, const void *data)
{
struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
return cb_buff->cb(data, cb_buff->private);
}
static void iio_buffer_cb_release(struct iio_buffer *buffer)
{
struct iio_cb_buffer *cb_buff = buffer_to_cb_buffer(buffer);
kfree(cb_buff->buffer.scan_mask);
kfree(cb_buff);
}
static const struct iio_buffer_access_funcs iio_cb_access = {
.store_to = &iio_buffer_cb_store_to,
.release = &iio_buffer_cb_release,
.modes = INDIO_BUFFER_SOFTWARE | INDIO_BUFFER_TRIGGERED,
};
struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
int (*cb)(const void *data,
void *private),
void *private)
{
int ret;
struct iio_cb_buffer *cb_buff;
struct iio_dev *indio_dev;
struct iio_channel *chan;
cb_buff = kzalloc(sizeof(*cb_buff), GFP_KERNEL);
if (cb_buff == NULL)
return ERR_PTR(-ENOMEM);
iio_buffer_init(&cb_buff->buffer);
cb_buff->private = private;
cb_buff->cb = cb;
cb_buff->buffer.access = &iio_cb_access;
INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
cb_buff->channels = iio_channel_get_all(dev);
if (IS_ERR(cb_buff->channels)) {
ret = PTR_ERR(cb_buff->channels);
goto error_free_cb_buff;
}
indio_dev = cb_buff->channels[0].indio_dev;
cb_buff->buffer.scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev->masklength), sizeof(long),
GFP_KERNEL);
if (cb_buff->buffer.scan_mask == NULL) {
ret = -ENOMEM;
goto error_release_channels;
}
chan = &cb_buff->channels[0];
while (chan->indio_dev) {
if (chan->indio_dev != indio_dev) {
ret = -EINVAL;
goto error_free_scan_mask;
}
set_bit(chan->channel->scan_index,
cb_buff->buffer.scan_mask);
chan++;
}
return cb_buff;
error_free_scan_mask:
kfree(cb_buff->buffer.scan_mask);
error_release_channels:
iio_channel_release_all(cb_buff->channels);
error_free_cb_buff:
kfree(cb_buff);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->channels[0].indio_dev,
&cb_buff->buffer,
NULL);
}
EXPORT_SYMBOL_GPL(iio_channel_start_all_cb);
void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff)
{
iio_update_buffers(cb_buff->channels[0].indio_dev,
NULL,
&cb_buff->buffer);
}
EXPORT_SYMBOL_GPL(iio_channel_stop_all_cb);
void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buff)
{
iio_channel_release_all(cb_buff->channels);
iio_buffer_put(&cb_buff->buffer);
}
EXPORT_SYMBOL_GPL(iio_channel_release_all_cb);
struct iio_channel
*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer)
{
return cb_buffer->channels;
}
EXPORT_SYMBOL_GPL(iio_channel_cb_get_channels);