staging: comedi: separate out comedi_buf_* functions

Create a new file, comedi_buf.c, to hold all the comedi_async buffer
functions. Currently they are all in drivers.c and really don't have
any association with that source file.

Signed-off-by: H Hartley Sweeten <hsweeten@visionengravers.com>
Signed-off-by: Ian Abbott <abbotti@mev.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
H Hartley Sweeten 2013-01-09 13:25:06 -07:00 committed by Greg Kroah-Hartman
parent d18431325b
commit ea082fb1b0
3 changed files with 423 additions and 399 deletions

View file

@ -1,4 +1,5 @@
comedi-y := comedi_fops.o range.o drivers.o
comedi-y := comedi_fops.o range.o drivers.o \
comedi_buf.o
comedi-$(CONFIG_PROC_FS) += proc.o
comedi-$(CONFIG_COMPAT) += comedi_compat32.o

View file

@ -0,0 +1,421 @@
/*
* comedi_buf.c
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "comedidev.h"
#include "comedi_internal.h"
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
struct comedi_async *async = s->async;
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
if (async->prealloc_buf && async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
if (async->buf_page_list) {
unsigned i;
for (i = 0; i < async->n_buf_pages; ++i) {
if (async->buf_page_list[i].virt_addr) {
clear_bit(PG_reserved,
&(virt_to_page(async->buf_page_list[i].
virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->
buf_page_list
[i].virt_addr,
async->
buf_page_list
[i].dma_addr);
} else {
free_page((unsigned long)
async->buf_page_list[i].
virt_addr);
}
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
async->n_buf_pages = 0;
}
/* allocate new buffer */
if (new_size) {
unsigned i = 0;
unsigned n_pages = new_size >> PAGE_SHIFT;
struct page **pages = NULL;
async->buf_page_list =
vzalloc(sizeof(struct comedi_buf_page) * n_pages);
if (async->buf_page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (pages) {
for (i = 0; i < n_pages; i++) {
if (s->async_dma_dir != DMA_NONE) {
async->buf_page_list[i].virt_addr =
dma_alloc_coherent(dev->hw_dev,
PAGE_SIZE,
&async->
buf_page_list
[i].dma_addr,
GFP_KERNEL |
__GFP_COMP);
} else {
async->buf_page_list[i].virt_addr =
(void *)
get_zeroed_page(GFP_KERNEL);
}
if (async->buf_page_list[i].virt_addr == NULL)
break;
set_bit(PG_reserved,
&(virt_to_page(async->buf_page_list[i].
virt_addr)->flags));
pages[i] = virt_to_page(async->buf_page_list[i].
virt_addr);
}
}
if (i == n_pages) {
async->prealloc_buf =
#ifdef PAGE_KERNEL_NOCACHE
vmap(pages, n_pages, VM_MAP, PAGE_KERNEL_NOCACHE);
#else
vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
#endif
}
vfree(pages);
if (async->prealloc_buf == NULL) {
/* Some allocation failed above. */
if (async->buf_page_list) {
for (i = 0; i < n_pages; i++) {
if (async->buf_page_list[i].virt_addr ==
NULL) {
break;
}
clear_bit(PG_reserved,
&(virt_to_page(async->
buf_page_list[i].
virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->
buf_page_list
[i].virt_addr,
async->
buf_page_list
[i].dma_addr);
} else {
free_page((unsigned long)
async->buf_page_list
[i].virt_addr);
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
}
return -ENOMEM;
}
async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
return 0;
}
void comedi_reset_async_buf(struct comedi_async *async)
{
async->buf_write_alloc_count = 0;
async->buf_write_count = 0;
async->buf_read_alloc_count = 0;
async->buf_read_count = 0;
async->buf_write_ptr = 0;
async->buf_read_ptr = 0;
async->cur_chan = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
async->munge_ptr = 0;
async->events = 0;
}
unsigned int comedi_buf_write_n_available(struct comedi_async *async)
{
unsigned int free_end;
unsigned int nbytes;
if (async == NULL)
return 0;
free_end = async->buf_read_count + async->prealloc_bufsz;
nbytes = free_end - async->buf_write_alloc_count;
nbytes -= nbytes % bytes_per_sample(async->subdevice);
/* barrier insures the read of buf_read_count in this
query occurs before any following writes to the buffer which
might be based on the return value from this query.
*/
smp_mb();
return nbytes;
}
/* allocates chunk for the writer from free buffer space */
unsigned int comedi_buf_write_alloc(struct comedi_async *async,
unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
nbytes = free_end - async->buf_write_alloc_count;
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_write_alloc);
/* allocates nothing unless it can completely fulfill the request */
unsigned int comedi_buf_write_alloc_strict(struct comedi_async *async,
unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
nbytes = 0;
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
/* munging is applied to data by core as it passes between user
* and kernel space */
static unsigned int comedi_buf_munge(struct comedi_async *async,
unsigned int num_bytes)
{
struct comedi_subdevice *s = async->subdevice;
unsigned int count = 0;
const unsigned num_sample_bytes = bytes_per_sample(s);
if (s->munge == NULL || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
return num_bytes;
}
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
int block_size;
block_size = num_bytes - count;
if (block_size < 0) {
dev_warn(s->device->class_dev,
"%s: %s: bug! block_size is negative\n",
__FILE__, __func__);
break;
}
if ((int)(async->munge_ptr + block_size -
async->prealloc_bufsz) > 0)
block_size = async->prealloc_bufsz - async->munge_ptr;
s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
smp_wmb(); /* barrier insures data is munged in buffer
* before munge_count is incremented */
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
return count;
}
/* transfers a chunk from writer to filled buffer space */
unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
{
if ((int)(async->buf_write_count + nbytes -
async->buf_write_alloc_count) > 0) {
dev_info(async->subdevice->device->class_dev,
"attempted to write-free more bytes than have been write-allocated.\n");
nbytes = async->buf_write_alloc_count - async->buf_write_count;
}
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
comedi_buf_munge(async, async->buf_write_count - async->munge_count);
if (async->buf_write_ptr >= async->prealloc_bufsz)
async->buf_write_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_write_free);
unsigned int comedi_buf_read_n_available(struct comedi_async *async)
{
unsigned num_bytes;
if (async == NULL)
return 0;
num_bytes = async->munge_count - async->buf_read_count;
/* barrier insures the read of munge_count in this
query occurs before any following reads of the buffer which
might be based on the return value from this query.
*/
smp_rmb();
return num_bytes;
}
EXPORT_SYMBOL(comedi_buf_read_n_available);
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes)
{
if ((int)(async->buf_read_alloc_count + nbytes - async->munge_count) >
0) {
nbytes = async->munge_count - async->buf_read_alloc_count;
}
async->buf_read_alloc_count += nbytes;
/* barrier insures read of munge_count occurs before we actually read
data out of buffer */
smp_rmb();
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_read_alloc);
/* transfers control of a chunk from reader to free buffer space */
unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes)
{
/* barrier insures data has been read out of
* buffer before read count is incremented */
smp_mb();
if ((int)(async->buf_read_count + nbytes -
async->buf_read_alloc_count) > 0) {
dev_info(async->subdevice->device->class_dev,
"attempted to read-free more bytes than have been read-allocated.\n");
nbytes = async->buf_read_alloc_count - async->buf_read_count;
}
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_read_free);
int comedi_buf_put(struct comedi_async *async, short x)
{
unsigned int n = comedi_buf_write_alloc_strict(async, sizeof(short));
if (n < sizeof(short)) {
async->events |= COMEDI_CB_ERROR;
return 0;
}
*(short *)(async->prealloc_buf + async->buf_write_ptr) = x;
comedi_buf_write_free(async, sizeof(short));
return 1;
}
EXPORT_SYMBOL(comedi_buf_put);
int comedi_buf_get(struct comedi_async *async, short *x)
{
unsigned int n = comedi_buf_read_n_available(async);
if (n < sizeof(short))
return 0;
comedi_buf_read_alloc(async, sizeof(short));
*x = *(short *)(async->prealloc_buf + async->buf_read_ptr);
comedi_buf_read_free(async, sizeof(short));
return 1;
}
EXPORT_SYMBOL(comedi_buf_get);
void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
const void *data, unsigned int num_bytes)
{
unsigned int write_ptr = async->buf_write_ptr + offset;
if (write_ptr >= async->prealloc_bufsz)
write_ptr %= async->prealloc_bufsz;
while (num_bytes) {
unsigned int block_size;
if (write_ptr + num_bytes > async->prealloc_bufsz)
block_size = async->prealloc_bufsz - write_ptr;
else
block_size = num_bytes;
memcpy(async->prealloc_buf + write_ptr, data, block_size);
data += block_size;
num_bytes -= block_size;
write_ptr = 0;
}
}
EXPORT_SYMBOL(comedi_buf_memcpy_to);
void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
void *dest, unsigned int nbytes)
{
void *src;
unsigned int read_ptr = async->buf_read_ptr + offset;
if (read_ptr >= async->prealloc_bufsz)
read_ptr %= async->prealloc_bufsz;
while (nbytes) {
unsigned int block_size;
src = async->prealloc_buf + read_ptr;
if (nbytes >= async->prealloc_bufsz - read_ptr)
block_size = async->prealloc_bufsz - read_ptr;
else
block_size = nbytes;
memcpy(dest, src, block_size);
nbytes -= block_size;
dest += block_size;
read_ptr = 0;
}
}
EXPORT_SYMBOL(comedi_buf_memcpy_from);

View file

@ -430,404 +430,6 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
return 1;
}
int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned long new_size)
{
struct comedi_async *async = s->async;
/* Round up new_size to multiple of PAGE_SIZE */
new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
/* if no change is required, do nothing */
if (async->prealloc_buf && async->prealloc_bufsz == new_size)
return 0;
/* deallocate old buffer */
if (async->prealloc_buf) {
vunmap(async->prealloc_buf);
async->prealloc_buf = NULL;
async->prealloc_bufsz = 0;
}
if (async->buf_page_list) {
unsigned i;
for (i = 0; i < async->n_buf_pages; ++i) {
if (async->buf_page_list[i].virt_addr) {
clear_bit(PG_reserved,
&(virt_to_page(async->buf_page_list[i].
virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->
buf_page_list
[i].virt_addr,
async->
buf_page_list
[i].dma_addr);
} else {
free_page((unsigned long)
async->buf_page_list[i].
virt_addr);
}
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
async->n_buf_pages = 0;
}
/* allocate new buffer */
if (new_size) {
unsigned i = 0;
unsigned n_pages = new_size >> PAGE_SHIFT;
struct page **pages = NULL;
async->buf_page_list =
vzalloc(sizeof(struct comedi_buf_page) * n_pages);
if (async->buf_page_list)
pages = vmalloc(sizeof(struct page *) * n_pages);
if (pages) {
for (i = 0; i < n_pages; i++) {
if (s->async_dma_dir != DMA_NONE) {
async->buf_page_list[i].virt_addr =
dma_alloc_coherent(dev->hw_dev,
PAGE_SIZE,
&async->
buf_page_list
[i].dma_addr,
GFP_KERNEL |
__GFP_COMP);
} else {
async->buf_page_list[i].virt_addr =
(void *)
get_zeroed_page(GFP_KERNEL);
}
if (async->buf_page_list[i].virt_addr == NULL)
break;
set_bit(PG_reserved,
&(virt_to_page(async->buf_page_list[i].
virt_addr)->flags));
pages[i] = virt_to_page(async->buf_page_list[i].
virt_addr);
}
}
if (i == n_pages) {
async->prealloc_buf =
#ifdef PAGE_KERNEL_NOCACHE
vmap(pages, n_pages, VM_MAP, PAGE_KERNEL_NOCACHE);
#else
vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
#endif
}
vfree(pages);
if (async->prealloc_buf == NULL) {
/* Some allocation failed above. */
if (async->buf_page_list) {
for (i = 0; i < n_pages; i++) {
if (async->buf_page_list[i].virt_addr ==
NULL) {
break;
}
clear_bit(PG_reserved,
&(virt_to_page(async->
buf_page_list[i].
virt_addr)->flags));
if (s->async_dma_dir != DMA_NONE) {
dma_free_coherent(dev->hw_dev,
PAGE_SIZE,
async->
buf_page_list
[i].virt_addr,
async->
buf_page_list
[i].dma_addr);
} else {
free_page((unsigned long)
async->buf_page_list
[i].virt_addr);
}
}
vfree(async->buf_page_list);
async->buf_page_list = NULL;
}
return -ENOMEM;
}
async->n_buf_pages = n_pages;
}
async->prealloc_bufsz = new_size;
return 0;
}
/* munging is applied to data by core as it passes between user
* and kernel space */
static unsigned int comedi_buf_munge(struct comedi_async *async,
unsigned int num_bytes)
{
struct comedi_subdevice *s = async->subdevice;
unsigned int count = 0;
const unsigned num_sample_bytes = bytes_per_sample(s);
if (s->munge == NULL || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
return num_bytes;
}
/* don't munge partial samples */
num_bytes -= num_bytes % num_sample_bytes;
while (count < num_bytes) {
int block_size;
block_size = num_bytes - count;
if (block_size < 0) {
dev_warn(s->device->class_dev,
"%s: %s: bug! block_size is negative\n",
__FILE__, __func__);
break;
}
if ((int)(async->munge_ptr + block_size -
async->prealloc_bufsz) > 0)
block_size = async->prealloc_bufsz - async->munge_ptr;
s->munge(s->device, s, async->prealloc_buf + async->munge_ptr,
block_size, async->munge_chan);
smp_wmb(); /* barrier insures data is munged in buffer
* before munge_count is incremented */
async->munge_chan += block_size / num_sample_bytes;
async->munge_chan %= async->cmd.chanlist_len;
async->munge_count += block_size;
async->munge_ptr += block_size;
async->munge_ptr %= async->prealloc_bufsz;
count += block_size;
}
BUG_ON((int)(async->munge_count - async->buf_write_count) > 0);
return count;
}
unsigned int comedi_buf_write_n_available(struct comedi_async *async)
{
unsigned int free_end;
unsigned int nbytes;
if (async == NULL)
return 0;
free_end = async->buf_read_count + async->prealloc_bufsz;
nbytes = free_end - async->buf_write_alloc_count;
nbytes -= nbytes % bytes_per_sample(async->subdevice);
/* barrier insures the read of buf_read_count in this
query occurs before any following writes to the buffer which
might be based on the return value from this query.
*/
smp_mb();
return nbytes;
}
/* allocates chunk for the writer from free buffer space */
unsigned int comedi_buf_write_alloc(struct comedi_async *async,
unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
nbytes = free_end - async->buf_write_alloc_count;
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_write_alloc);
/* allocates nothing unless it can completely fulfill the request */
unsigned int comedi_buf_write_alloc_strict(struct comedi_async *async,
unsigned int nbytes)
{
unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
if ((int)(async->buf_write_alloc_count + nbytes - free_end) > 0)
nbytes = 0;
async->buf_write_alloc_count += nbytes;
/* barrier insures the read of buf_read_count above occurs before
we write data to the write-alloc'ed buffer space */
smp_mb();
return nbytes;
}
/* transfers a chunk from writer to filled buffer space */
unsigned comedi_buf_write_free(struct comedi_async *async, unsigned int nbytes)
{
if ((int)(async->buf_write_count + nbytes -
async->buf_write_alloc_count) > 0) {
dev_info(async->subdevice->device->class_dev,
"attempted to write-free more bytes than have been write-allocated.\n");
nbytes = async->buf_write_alloc_count - async->buf_write_count;
}
async->buf_write_count += nbytes;
async->buf_write_ptr += nbytes;
comedi_buf_munge(async, async->buf_write_count - async->munge_count);
if (async->buf_write_ptr >= async->prealloc_bufsz)
async->buf_write_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_write_free);
/* allocates a chunk for the reader from filled (and munged) buffer space */
unsigned comedi_buf_read_alloc(struct comedi_async *async, unsigned nbytes)
{
if ((int)(async->buf_read_alloc_count + nbytes - async->munge_count) >
0) {
nbytes = async->munge_count - async->buf_read_alloc_count;
}
async->buf_read_alloc_count += nbytes;
/* barrier insures read of munge_count occurs before we actually read
data out of buffer */
smp_rmb();
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_read_alloc);
/* transfers control of a chunk from reader to free buffer space */
unsigned comedi_buf_read_free(struct comedi_async *async, unsigned int nbytes)
{
/* barrier insures data has been read out of
* buffer before read count is incremented */
smp_mb();
if ((int)(async->buf_read_count + nbytes -
async->buf_read_alloc_count) > 0) {
dev_info(async->subdevice->device->class_dev,
"attempted to read-free more bytes than have been read-allocated.\n");
nbytes = async->buf_read_alloc_count - async->buf_read_count;
}
async->buf_read_count += nbytes;
async->buf_read_ptr += nbytes;
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
EXPORT_SYMBOL(comedi_buf_read_free);
void comedi_buf_memcpy_to(struct comedi_async *async, unsigned int offset,
const void *data, unsigned int num_bytes)
{
unsigned int write_ptr = async->buf_write_ptr + offset;
if (write_ptr >= async->prealloc_bufsz)
write_ptr %= async->prealloc_bufsz;
while (num_bytes) {
unsigned int block_size;
if (write_ptr + num_bytes > async->prealloc_bufsz)
block_size = async->prealloc_bufsz - write_ptr;
else
block_size = num_bytes;
memcpy(async->prealloc_buf + write_ptr, data, block_size);
data += block_size;
num_bytes -= block_size;
write_ptr = 0;
}
}
EXPORT_SYMBOL(comedi_buf_memcpy_to);
void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
void *dest, unsigned int nbytes)
{
void *src;
unsigned int read_ptr = async->buf_read_ptr + offset;
if (read_ptr >= async->prealloc_bufsz)
read_ptr %= async->prealloc_bufsz;
while (nbytes) {
unsigned int block_size;
src = async->prealloc_buf + read_ptr;
if (nbytes >= async->prealloc_bufsz - read_ptr)
block_size = async->prealloc_bufsz - read_ptr;
else
block_size = nbytes;
memcpy(dest, src, block_size);
nbytes -= block_size;
dest += block_size;
read_ptr = 0;
}
}
EXPORT_SYMBOL(comedi_buf_memcpy_from);
unsigned int comedi_buf_read_n_available(struct comedi_async *async)
{
unsigned num_bytes;
if (async == NULL)
return 0;
num_bytes = async->munge_count - async->buf_read_count;
/* barrier insures the read of munge_count in this
query occurs before any following reads of the buffer which
might be based on the return value from this query.
*/
smp_rmb();
return num_bytes;
}
EXPORT_SYMBOL(comedi_buf_read_n_available);
int comedi_buf_get(struct comedi_async *async, short *x)
{
unsigned int n = comedi_buf_read_n_available(async);
if (n < sizeof(short))
return 0;
comedi_buf_read_alloc(async, sizeof(short));
*x = *(short *)(async->prealloc_buf + async->buf_read_ptr);
comedi_buf_read_free(async, sizeof(short));
return 1;
}
EXPORT_SYMBOL(comedi_buf_get);
int comedi_buf_put(struct comedi_async *async, short x)
{
unsigned int n = comedi_buf_write_alloc_strict(async, sizeof(short));
if (n < sizeof(short)) {
async->events |= COMEDI_CB_ERROR;
return 0;
}
*(short *)(async->prealloc_buf + async->buf_write_ptr) = x;
comedi_buf_write_free(async, sizeof(short));
return 1;
}
EXPORT_SYMBOL(comedi_buf_put);
void comedi_reset_async_buf(struct comedi_async *async)
{
async->buf_write_alloc_count = 0;
async->buf_write_count = 0;
async->buf_read_alloc_count = 0;
async->buf_read_count = 0;
async->buf_write_ptr = 0;
async->buf_read_ptr = 0;
async->cur_chan = 0;
async->scan_progress = 0;
async->munge_chan = 0;
async->munge_count = 0;
async->munge_ptr = 0;
async->events = 0;
}
int comedi_auto_config(struct device *hardware_device,
struct comedi_driver *driver, unsigned long context)
{