staging: tidspbridge: convert core to list_head

Convert the core module of the tidspbridge driver
to use struct list_head instead of struct lst_list.

Signed-off-by: Ionut Nicu <ionut.nicu@mindbit.ro>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
This commit is contained in:
Ionut Nicu 2010-11-21 10:46:24 +00:00 committed by Omar Ramirez Luna
parent 6d7e925b88
commit 3c6bf30f1e
7 changed files with 264 additions and 381 deletions

View file

@ -20,7 +20,7 @@
#ifndef _MSG_SM_
#define _MSG_SM_
#include <dspbridge/list.h>
#include <linux/list.h>
#include <dspbridge/msgdefs.h>
/*
@ -86,12 +86,12 @@ struct msg_mgr {
struct bridge_drv_interface *intf_fxns;
struct io_mgr *hio_mgr; /* IO manager */
struct lst_list *queue_list; /* List of MSG_QUEUEs */
struct list_head queue_list; /* List of MSG_QUEUEs */
spinlock_t msg_mgr_lock; /* For critical sections */
/* Signalled when MsgFrame is available */
struct sync_object *sync_event;
struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
struct list_head msg_used_list; /* MsgFrames ready to go to DSP */
u32 msgs_pending; /* # of queued messages to go to DSP */
u32 max_msgs; /* Max # of msgs that fit in buffer */
msg_onexit on_exit; /* called when RMS_EXIT is received */
@ -111,9 +111,9 @@ struct msg_queue {
struct msg_mgr *hmsg_mgr;
u32 max_msgs; /* Node message depth */
u32 msgq_id; /* Node environment pointer */
struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
struct list_head msg_free_list; /* Free MsgFrames ready to be filled */
/* Filled MsgFramess waiting to be read */
struct lst_list *msg_used_list;
struct list_head msg_used_list;
void *arg; /* Handle passed to mgr on_exit callback */
struct sync_object *sync_event; /* Signalled when message is ready */
struct sync_object *sync_done; /* For synchronizing cleanup */

View file

@ -37,9 +37,9 @@
* which may cause timeouts and/or failure offunction sync_wait_on_event.
* This invariant condition is:
*
* LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is reset
* list_empty(&pchnl->pio_completions) ==> pchnl->sync_event is reset
* and
* !LST_Empty(pchnl->pio_completions) ==> pchnl->sync_event is set.
* !list_empty(&pchnl->pio_completions) ==> pchnl->sync_event is set.
*/
#include <linux/types.h>
@ -73,11 +73,9 @@
#define MAILBOX_IRQ INT_MAIL_MPU_IRQ
/* ----------------------------------- Function Prototypes */
static struct lst_list *create_chirp_list(u32 chirps);
static int create_chirp_list(struct list_head *list, u32 chirps);
static void free_chirp_list(struct lst_list *chirp_list);
static struct chnl_irp *make_new_chirp(void);
static void free_chirp_list(struct list_head *list);
static int search_free_channel(struct chnl_mgr *chnl_mgr_obj,
u32 *chnl);
@ -179,10 +177,14 @@ func_cont:
}
if (!status) {
/* Get a free chirp: */
chnl_packet_obj =
(struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
if (chnl_packet_obj == NULL)
if (!list_empty(&pchnl->free_packets_list)) {
chnl_packet_obj = list_first_entry(
&pchnl->free_packets_list,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
} else {
status = -EIO;
}
}
if (!status) {
@ -206,8 +208,7 @@ func_cont:
chnl_packet_obj->dw_arg = dw_arg;
chnl_packet_obj->status = (is_eos ? CHNL_IOCSTATEOS :
CHNL_IOCSTATCOMPLETE);
lst_put_tail(pchnl->pio_requests,
(struct list_head *)chnl_packet_obj);
list_add_tail(&chnl_packet_obj->link, &pchnl->pio_requests);
pchnl->cio_reqs++;
DBC_ASSERT(pchnl->cio_reqs <= pchnl->chnl_packets);
/*
@ -254,7 +255,7 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
struct chnl_object *pchnl = (struct chnl_object *)chnl_obj;
u32 chnl_id = -1;
s8 chnl_mode;
struct chnl_irp *chnl_packet_obj;
struct chnl_irp *chirp, *tmp;
struct chnl_mgr *chnl_mgr_obj = NULL;
/* Check args: */
@ -272,7 +273,7 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
* IORequests or dispatching. */
spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
pchnl->dw_state |= CHNL_STATECANCEL;
if (LST_IS_EMPTY(pchnl->pio_requests))
if (list_empty(&pchnl->pio_requests))
goto func_cont;
if (pchnl->chnl_type == CHNL_PCPY) {
@ -286,18 +287,14 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
}
}
/* Move all IOR's to IOC queue: */
while (!LST_IS_EMPTY(pchnl->pio_requests)) {
chnl_packet_obj =
(struct chnl_irp *)lst_get_head(pchnl->pio_requests);
if (chnl_packet_obj) {
chnl_packet_obj->byte_size = 0;
chnl_packet_obj->status |= CHNL_IOCSTATCANCEL;
lst_put_tail(pchnl->pio_completions,
(struct list_head *)chnl_packet_obj);
pchnl->cio_cs++;
pchnl->cio_reqs--;
DBC_ASSERT(pchnl->cio_reqs >= 0);
}
list_for_each_entry_safe(chirp, tmp, &pchnl->pio_requests, link) {
list_del(&chirp->link);
chirp->byte_size = 0;
chirp->status |= CHNL_IOCSTATCANCEL;
list_add_tail(&chirp->link, &pchnl->pio_completions);
pchnl->cio_cs++;
pchnl->cio_reqs--;
DBC_ASSERT(pchnl->cio_reqs >= 0);
}
func_cont:
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@ -353,20 +350,14 @@ func_cont:
pchnl->sync_event = NULL;
}
/* Free I/O request and I/O completion queues: */
if (pchnl->pio_completions) {
free_chirp_list(pchnl->pio_completions);
pchnl->pio_completions = NULL;
pchnl->cio_cs = 0;
}
if (pchnl->pio_requests) {
free_chirp_list(pchnl->pio_requests);
pchnl->pio_requests = NULL;
pchnl->cio_reqs = 0;
}
if (pchnl->free_packets_list) {
free_chirp_list(pchnl->free_packets_list);
pchnl->free_packets_list = NULL;
}
free_chirp_list(&pchnl->pio_completions);
pchnl->cio_cs = 0;
free_chirp_list(&pchnl->pio_requests);
pchnl->cio_reqs = 0;
free_chirp_list(&pchnl->free_packets_list);
/* Release channel object. */
kfree(pchnl);
pchnl = NULL;
@ -505,7 +496,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
&& (pchnl->chnl_type == CHNL_PCPY)) {
/* Wait for IO completions, up to the specified
* timeout: */
while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
while (!list_empty(&pchnl->pio_requests) && !status) {
status = bridge_chnl_get_ioc(chnl_obj,
timeout, &chnl_ioc_obj);
if (status)
@ -521,7 +512,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
pchnl->dw_state &= ~CHNL_STATECANCEL;
}
}
DBC_ENSURE(status || LST_IS_EMPTY(pchnl->pio_requests));
DBC_ENSURE(status || list_empty(&pchnl->pio_requests));
return status;
}
@ -581,7 +572,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
if (!chan_ioc || !pchnl) {
status = -EFAULT;
} else if (timeout == CHNL_IOCNOWAIT) {
if (LST_IS_EMPTY(pchnl->pio_completions))
if (list_empty(&pchnl->pio_completions))
status = -EREMOTEIO;
}
@ -596,7 +587,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
ioc.status = CHNL_IOCSTATCOMPLETE;
if (timeout !=
CHNL_IOCNOWAIT && LST_IS_EMPTY(pchnl->pio_completions)) {
CHNL_IOCNOWAIT && list_empty(&pchnl->pio_completions)) {
if (timeout == CHNL_IOCINFINITE)
timeout = SYNC_INFINITE;
@ -611,7 +602,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
* fails due to unkown causes. */
/* Even though Wait failed, there may be something in
* the Q: */
if (LST_IS_EMPTY(pchnl->pio_completions)) {
if (list_empty(&pchnl->pio_completions)) {
ioc.status |= CHNL_IOCSTATCANCEL;
dequeue_ioc = false;
}
@ -622,30 +613,26 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
if (dequeue_ioc) {
/* Dequeue IOC and set chan_ioc; */
DBC_ASSERT(!LST_IS_EMPTY(pchnl->pio_completions));
chnl_packet_obj =
(struct chnl_irp *)lst_get_head(pchnl->pio_completions);
DBC_ASSERT(!list_empty(&pchnl->pio_completions));
chnl_packet_obj = list_first_entry(&pchnl->pio_completions,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
/* Update chan_ioc from channel state and chirp: */
if (chnl_packet_obj) {
pchnl->cio_cs--;
/* If this is a zero-copy channel, then set IOC's pbuf
* to the DSP's address. This DSP address will get
* translated to user's virtual addr later. */
{
host_sys_buf = chnl_packet_obj->host_sys_buf;
ioc.pbuf = chnl_packet_obj->host_user_buf;
}
ioc.byte_size = chnl_packet_obj->byte_size;
ioc.buf_size = chnl_packet_obj->buf_size;
ioc.dw_arg = chnl_packet_obj->dw_arg;
ioc.status |= chnl_packet_obj->status;
/* Place the used chirp on the free list: */
lst_put_tail(pchnl->free_packets_list,
(struct list_head *)chnl_packet_obj);
} else {
ioc.pbuf = NULL;
ioc.byte_size = 0;
}
pchnl->cio_cs--;
/*
* If this is a zero-copy channel, then set IOC's pbuf
* to the DSP's address. This DSP address will get
* translated to user's virtual addr later.
*/
host_sys_buf = chnl_packet_obj->host_sys_buf;
ioc.pbuf = chnl_packet_obj->host_user_buf;
ioc.byte_size = chnl_packet_obj->byte_size;
ioc.buf_size = chnl_packet_obj->buf_size;
ioc.dw_arg = chnl_packet_obj->dw_arg;
ioc.status |= chnl_packet_obj->status;
/* Place the used chirp on the free list: */
list_add_tail(&chnl_packet_obj->link,
&pchnl->free_packets_list);
} else {
ioc.pbuf = NULL;
ioc.byte_size = 0;
@ -653,7 +640,7 @@ int bridge_chnl_get_ioc(struct chnl_object *chnl_obj, u32 timeout,
ioc.buf_size = 0;
}
/* Ensure invariant: If any IOC's are queued for this channel... */
if (!LST_IS_EMPTY(pchnl->pio_completions)) {
if (!list_empty(&pchnl->pio_completions)) {
/* Since DSPStream_Reclaim() does not take a timeout
* parameter, we pass the stream's timeout value to
* bridge_chnl_get_ioc. We cannot determine whether or not
@ -818,9 +805,16 @@ int bridge_chnl_open(struct chnl_object **chnl,
/* Protect queues from io_dpc: */
pchnl->dw_state = CHNL_STATECANCEL;
/* Allocate initial IOR and IOC queues: */
pchnl->free_packets_list = create_chirp_list(pattrs->uio_reqs);
pchnl->pio_requests = create_chirp_list(0);
pchnl->pio_completions = create_chirp_list(0);
status = create_chirp_list(&pchnl->free_packets_list,
pattrs->uio_reqs);
if (status) {
kfree(pchnl);
goto func_end;
}
INIT_LIST_HEAD(&pchnl->pio_requests);
INIT_LIST_HEAD(&pchnl->pio_completions);
pchnl->chnl_packets = pattrs->uio_reqs;
pchnl->cio_cs = 0;
pchnl->cio_reqs = 0;
@ -840,40 +834,26 @@ int bridge_chnl_open(struct chnl_object **chnl,
}
if (!status) {
if (pchnl->pio_completions && pchnl->pio_requests &&
pchnl->free_packets_list) {
/* Initialize CHNL object fields: */
pchnl->chnl_mgr_obj = chnl_mgr_obj;
pchnl->chnl_id = ch_id;
pchnl->chnl_mode = chnl_mode;
pchnl->user_event = sync_event;
pchnl->sync_event = sync_event;
/* Get the process handle */
pchnl->process = current->tgid;
pchnl->pcb_arg = 0;
pchnl->bytes_moved = 0;
/* Default to proc-copy */
pchnl->chnl_type = CHNL_PCPY;
} else {
status = -ENOMEM;
}
/* Initialize CHNL object fields: */
pchnl->chnl_mgr_obj = chnl_mgr_obj;
pchnl->chnl_id = ch_id;
pchnl->chnl_mode = chnl_mode;
pchnl->user_event = sync_event;
pchnl->sync_event = sync_event;
/* Get the process handle */
pchnl->process = current->tgid;
pchnl->pcb_arg = 0;
pchnl->bytes_moved = 0;
/* Default to proc-copy */
pchnl->chnl_type = CHNL_PCPY;
}
if (status) {
/* Free memory */
if (pchnl->pio_completions) {
free_chirp_list(pchnl->pio_completions);
pchnl->pio_completions = NULL;
pchnl->cio_cs = 0;
}
if (pchnl->pio_requests) {
free_chirp_list(pchnl->pio_requests);
pchnl->pio_requests = NULL;
}
if (pchnl->free_packets_list) {
free_chirp_list(pchnl->free_packets_list);
pchnl->free_packets_list = NULL;
}
free_chirp_list(&pchnl->pio_completions);
pchnl->cio_cs = 0;
free_chirp_list(&pchnl->pio_requests);
free_chirp_list(&pchnl->free_packets_list);
kfree(sync_event);
sync_event = NULL;
@ -924,37 +904,35 @@ int bridge_chnl_register_notify(struct chnl_object *chnl_obj,
* Purpose:
* Initialize a queue of channel I/O Request/Completion packets.
* Parameters:
* list: Pointer to a list_head
* chirps: Number of Chirps to allocate.
* Returns:
* Pointer to queue of IRPs, or NULL.
* 0 if successful, error code otherwise.
* Requires:
* Ensures:
*/
static struct lst_list *create_chirp_list(u32 chirps)
static int create_chirp_list(struct list_head *list, u32 chirps)
{
struct lst_list *chirp_list;
struct chnl_irp *chnl_packet_obj;
struct chnl_irp *chirp;
u32 i;
chirp_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
INIT_LIST_HEAD(list);
if (chirp_list) {
INIT_LIST_HEAD(&chirp_list->head);
/* Make N chirps and place on queue. */
for (i = 0; (i < chirps)
&& ((chnl_packet_obj = make_new_chirp()) != NULL); i++) {
lst_put_tail(chirp_list,
(struct list_head *)chnl_packet_obj);
}
/* If we couldn't allocate all chirps, free those allocated: */
if (i != chirps) {
free_chirp_list(chirp_list);
chirp_list = NULL;
}
/* Make N chirps and place on queue. */
for (i = 0; i < chirps; i++) {
chirp = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
if (!chirp)
break;
list_add_tail(&chirp->link, list);
}
return chirp_list;
/* If we couldn't allocate all chirps, free those allocated: */
if (i != chirps) {
free_chirp_list(list);
return -ENOMEM;
}
return 0;
}
/*
@ -962,31 +940,16 @@ static struct lst_list *create_chirp_list(u32 chirps)
* Purpose:
* Free the queue of Chirps.
*/
static void free_chirp_list(struct lst_list *chirp_list)
static void free_chirp_list(struct list_head *chirp_list)
{
struct chnl_irp *chirp, *tmp;
DBC_REQUIRE(chirp_list != NULL);
while (!LST_IS_EMPTY(chirp_list))
kfree(lst_get_head(chirp_list));
kfree(chirp_list);
}
/*
* ======== make_new_chirp ========
* Allocate the memory for a new channel IRP.
*/
static struct chnl_irp *make_new_chirp(void)
{
struct chnl_irp *chnl_packet_obj;
chnl_packet_obj = kzalloc(sizeof(struct chnl_irp), GFP_KERNEL);
if (chnl_packet_obj != NULL) {
/* lst_init_elem only resets the list's member values. */
lst_init_elem(&chnl_packet_obj->link);
list_for_each_entry_safe(chirp, tmp, chirp_list, link) {
list_del(&chirp->link);
kfree(chirp);
}
return chnl_packet_obj;
}
/*

View file

@ -24,6 +24,7 @@
* function.
*/
#include <linux/types.h>
#include <linux/list.h>
/* Host OS */
#include <dspbridge/host_os.h>
@ -1092,15 +1093,17 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
pchnl = chnl_mgr_obj->ap_channel[chnl_id];
if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
if (!pchnl->pio_requests)
goto func_end;
/* Get the I/O request, and attempt a transfer */
chnl_packet_obj = (struct chnl_irp *)
lst_get_head(pchnl->pio_requests);
if (chnl_packet_obj) {
pchnl->cio_reqs--;
if (pchnl->cio_reqs < 0)
if (!list_empty(&pchnl->pio_requests)) {
if (!pchnl->cio_reqs)
goto func_end;
chnl_packet_obj = list_first_entry(
&pchnl->pio_requests,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
pchnl->cio_reqs--;
/*
* Ensure we don't overflow the client's
* buffer.
@ -1127,21 +1130,18 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
* the channel state.
*/
chnl_packet_obj->status |=
CHNL_IOCSTATEOS;
CHNL_IOCSTATEOS;
pchnl->dw_state |= CHNL_STATEEOS;
/*
* Notify that end of stream has
* occurred.
*/
ntfy_notify(pchnl->ntfy_obj,
DSP_STREAMDONE);
DSP_STREAMDONE);
}
/* Tell DSP if no more I/O buffers available */
if (!pchnl->pio_requests)
goto func_end;
if (LST_IS_EMPTY(pchnl->pio_requests)) {
if (list_empty(&pchnl->pio_requests))
set_chnl_free(sm, pchnl->chnl_id);
}
clear_chnl = true;
notify_client = true;
} else {
@ -1213,21 +1213,18 @@ static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
msg.msgq_id =
read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
msg_input += sizeof(struct msg_dspmsg);
if (!hmsg_mgr->queue_list)
goto func_end;
/* Determine which queue to put the message in */
msg_queue_obj =
(struct msg_queue *)lst_first(hmsg_mgr->queue_list);
dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
"dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
"dw_arg2=0x%x msgq_id=0x%x\n", msg.msg.dw_cmd,
msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
/*
* Interrupt may occur before shared memory and message
* input locations have been set up. If all nodes were
* cleaned up, hmsg_mgr->max_msgs should be 0.
*/
while (msg_queue_obj != NULL) {
list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
list_elem) {
if (msg.msgq_id == msg_queue_obj->msgq_id) {
/* Found it */
if (msg.msg.dw_cmd == RMS_EXITACK) {
@ -1237,47 +1234,39 @@ static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
* queued.
*/
(*hmsg_mgr->on_exit) ((void *)
msg_queue_obj->arg,
msg.msg.dw_arg1);
msg_queue_obj->arg,
msg.msg.dw_arg1);
break;
}
/*
* Not an exit acknowledgement, queue
* the message.
*/
if (!list_empty(&msg_queue_obj->
msg_free_list)) {
pmsg = list_first_entry(
&msg_queue_obj->msg_free_list,
struct msg_frame, list_elem);
list_del(&pmsg->list_elem);
pmsg->msg_data = msg;
list_add_tail(&pmsg->list_elem,
&msg_queue_obj->msg_used_list);
ntfy_notify
(msg_queue_obj->ntfy_obj,
DSP_NODEMESSAGEREADY);
sync_set_event
(msg_queue_obj->sync_event);
} else {
/*
* Not an exit acknowledgement, queue
* the message.
* No free frame to copy the
* message into.
*/
if (!msg_queue_obj->msg_free_list)
goto func_end;
pmsg = (struct msg_frame *)lst_get_head
(msg_queue_obj->msg_free_list);
if (msg_queue_obj->msg_used_list
&& pmsg) {
pmsg->msg_data = msg;
lst_put_tail
(msg_queue_obj->msg_used_list,
(struct list_head *)pmsg);
ntfy_notify
(msg_queue_obj->ntfy_obj,
DSP_NODEMESSAGEREADY);
sync_set_event
(msg_queue_obj->sync_event);
} else {
/*
* No free frame to copy the
* message into.
*/
pr_err("%s: no free msg frames,"
" discarding msg\n",
__func__);
}
pr_err("%s: no free msg frames,"
" discarding msg\n",
__func__);
}
break;
}
if (!hmsg_mgr->queue_list || !msg_queue_obj)
goto func_end;
msg_queue_obj =
(struct msg_queue *)lst_next(hmsg_mgr->queue_list,
(struct list_head *)
msg_queue_obj);
}
}
/* Set the post SWI flag */
@ -1301,8 +1290,7 @@ static void notify_chnl_complete(struct chnl_object *pchnl,
{
bool signal_event;
if (!pchnl || !pchnl->sync_event ||
!pchnl->pio_completions || !chnl_packet_obj)
if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
goto func_end;
/*
@ -1311,10 +1299,9 @@ static void notify_chnl_complete(struct chnl_object *pchnl,
* signalled by the only IO completion list consumer:
* bridge_chnl_get_ioc().
*/
signal_event = LST_IS_EMPTY(pchnl->pio_completions);
signal_event = list_empty(&pchnl->pio_completions);
/* Enqueue the IO completion info for the client */
lst_put_tail(pchnl->pio_completions,
(struct list_head *)chnl_packet_obj);
list_add_tail(&chnl_packet_obj->link, &pchnl->pio_completions);
pchnl->cio_cs++;
if (pchnl->cio_cs > pchnl->chnl_packets)
@ -1361,21 +1348,23 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
goto func_end;
pchnl = chnl_mgr_obj->ap_channel[chnl_id];
if (!pchnl || !pchnl->pio_requests) {
if (!pchnl || list_empty(&pchnl->pio_requests)) {
/* Shouldn't get here */
goto func_end;
}
/* Get the I/O request, and attempt a transfer */
chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
if (!chnl_packet_obj)
if (!pchnl->cio_reqs)
goto func_end;
/* Get the I/O request, and attempt a transfer */
chnl_packet_obj = list_first_entry(&pchnl->pio_requests,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
pchnl->cio_reqs--;
if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
goto func_end;
/* Record fact that no more I/O buffers available */
if (LST_IS_EMPTY(pchnl->pio_requests))
if (list_empty(&pchnl->pio_requests))
chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
/* Transfer buffer to DSP side */
@ -1436,14 +1425,11 @@ static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
msg_output = pio_mgr->msg_output;
/* Copy num_msgs messages into shared memory */
for (i = 0; i < num_msgs; i++) {
if (!hmsg_mgr->msg_used_list) {
pmsg = NULL;
goto func_end;
} else {
pmsg = (struct msg_frame *)
lst_get_head(hmsg_mgr->msg_used_list);
}
if (pmsg != NULL) {
if (!list_empty(&hmsg_mgr->msg_used_list)) {
pmsg = list_first_entry(
&hmsg_mgr->msg_used_list,
struct msg_frame, list_elem);
list_del(&pmsg->list_elem);
val = (pmsg->msg_data).msgq_id;
addr = (u32) &(((struct msg_dspmsg *)
msg_output)->msgq_id);
@ -1465,10 +1451,8 @@ static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
write_ext32_bit_dsp_data(
pio_mgr->hbridge_context, addr, val);
msg_output += sizeof(struct msg_dspmsg);
if (!hmsg_mgr->msg_free_list)
goto func_end;
lst_put_tail(hmsg_mgr->msg_free_list,
(struct list_head *)pmsg);
list_add_tail(&pmsg->list_elem,
&hmsg_mgr->msg_free_list);
sync_set_event(hmsg_mgr->sync_event);
}
}
@ -1492,8 +1476,6 @@ static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
MBX_PCPY_CLASS);
}
}
func_end:
return;
}
/*

View file

@ -24,7 +24,6 @@
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/list.h>
#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
@ -38,10 +37,10 @@
#include <dspbridge/dspmsg.h>
/* ----------------------------------- Function Prototypes */
static int add_new_msg(struct lst_list *msg_list);
static int add_new_msg(struct list_head *msg_list);
static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp);
static void free_msg_list(struct lst_list *msg_list);
static void free_msg_list(struct list_head *msg_list);
/*
* ======== bridge_msg_create ========
@ -73,25 +72,13 @@ int bridge_msg_create(struct msg_mgr **msg_man,
msg_mgr_obj->on_exit = msg_callback;
msg_mgr_obj->hio_mgr = hio_mgr;
/* List of MSG_QUEUEs */
msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
INIT_LIST_HEAD(&msg_mgr_obj->queue_list);
/* Queues of message frames for messages to the DSP. Message
* frames will only be added to the free queue when a
* msg_queue object is created. */
msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
if (msg_mgr_obj->queue_list == NULL ||
msg_mgr_obj->msg_free_list == NULL ||
msg_mgr_obj->msg_used_list == NULL) {
status = -ENOMEM;
} else {
INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
}
INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list);
INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list);
spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
/* Create an event to be used by bridge_msg_put() in waiting
* for an available free frame from the message manager. */
@ -128,7 +115,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
struct msg_queue *msg_q;
int status = 0;
if (!hmsg_mgr || msgq == NULL || !hmsg_mgr->msg_free_list) {
if (!hmsg_mgr || msgq == NULL) {
status = -EFAULT;
goto func_end;
}
@ -140,20 +127,13 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
status = -ENOMEM;
goto func_end;
}
lst_init_elem((struct list_head *)msg_q);
msg_q->max_msgs = max_msgs;
msg_q->hmsg_mgr = hmsg_mgr;
msg_q->arg = arg; /* Node handle */
msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
/* Queues of Message frames for messages from the DSP */
msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
status = -ENOMEM;
else {
INIT_LIST_HEAD(&msg_q->msg_free_list->head);
INIT_LIST_HEAD(&msg_q->msg_used_list->head);
}
INIT_LIST_HEAD(&msg_q->msg_free_list);
INIT_LIST_HEAD(&msg_q->msg_used_list);
/* Create event that will be signalled when a message from
* the DSP is available. */
@ -204,10 +184,10 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* Initialize message frames and put in appropriate queues */
for (i = 0; i < max_msgs && !status; i++) {
status = add_new_msg(hmsg_mgr->msg_free_list);
status = add_new_msg(&hmsg_mgr->msg_free_list);
if (!status) {
num_allocated++;
status = add_new_msg(msg_q->msg_free_list);
status = add_new_msg(&msg_q->msg_free_list);
}
}
if (status) {
@ -215,11 +195,11 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
* of the newly allocated message frames. */
delete_msg_queue(msg_q, num_allocated);
} else {
lst_put_tail(hmsg_mgr->queue_list,
(struct list_head *)msg_q);
list_add_tail(&msg_q->list_elem,
&hmsg_mgr->queue_list);
*msgq = msg_q;
/* Signal that free frames are now available */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
if (!list_empty(&hmsg_mgr->msg_free_list))
sync_set_event(hmsg_mgr->sync_event);
}
@ -267,15 +247,12 @@ void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
}
/* Remove message queue from hmsg_mgr->queue_list */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
lst_remove_elem(hmsg_mgr->queue_list,
(struct list_head *)msg_queue_obj);
list_del(&msg_queue_obj->list_elem);
/* Free the message queue object */
delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
if (!hmsg_mgr->msg_free_list)
goto func_cont;
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
if (list_empty(&hmsg_mgr->msg_free_list))
sync_reset_event(hmsg_mgr->sync_event);
func_cont:
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
func_end:
return;
@ -301,26 +278,21 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
}
hmsg_mgr = msg_queue_obj->hmsg_mgr;
if (!msg_queue_obj->msg_used_list) {
status = -EFAULT;
goto func_end;
}
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* If a message is already there, get it */
if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
msg_frame_obj = (struct msg_frame *)
lst_get_head(msg_queue_obj->msg_used_list);
if (msg_frame_obj != NULL) {
*pmsg = msg_frame_obj->msg_data.msg;
lst_put_tail(msg_queue_obj->msg_free_list,
(struct list_head *)msg_frame_obj);
if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
sync_reset_event(msg_queue_obj->sync_event);
if (!list_empty(&msg_queue_obj->msg_used_list)) {
msg_frame_obj = list_first_entry(&msg_queue_obj->msg_used_list,
struct msg_frame, list_elem);
list_del(&msg_frame_obj->list_elem);
*pmsg = msg_frame_obj->msg_data.msg;
list_add_tail(&msg_frame_obj->list_elem,
&msg_queue_obj->msg_free_list);
if (list_empty(&msg_queue_obj->msg_used_list))
sync_reset_event(msg_queue_obj->sync_event);
got_msg = true;
}
got_msg = true;
} else {
if (msg_queue_obj->done)
status = -EPERM;
@ -349,25 +321,22 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj,
(void)sync_set_event(msg_queue_obj->sync_done_ack);
status = -EPERM;
} else {
if (!status) {
DBC_ASSERT(!LST_IS_EMPTY
(msg_queue_obj->msg_used_list));
if (!status && !list_empty(&msg_queue_obj->
msg_used_list)) {
/* Get msg from used list */
msg_frame_obj = (struct msg_frame *)
lst_get_head(msg_queue_obj->msg_used_list);
msg_frame_obj = list_first_entry(
&msg_queue_obj->msg_used_list,
struct msg_frame, list_elem);
list_del(&msg_frame_obj->list_elem);
/* Copy message into pmsg and put frame on the
* free list */
if (msg_frame_obj != NULL) {
*pmsg = msg_frame_obj->msg_data.msg;
lst_put_tail
(msg_queue_obj->msg_free_list,
(struct list_head *)
msg_frame_obj);
}
*pmsg = msg_frame_obj->msg_data.msg;
list_add_tail(&msg_frame_obj->list_elem,
&msg_queue_obj->msg_free_list);
}
msg_queue_obj->io_msg_pend--;
/* Reset the event if there are still queued messages */
if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
if (!list_empty(&msg_queue_obj->msg_used_list))
sync_set_event(msg_queue_obj->sync_event);
/* Exit critical section */
@ -397,27 +366,22 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
goto func_end;
}
hmsg_mgr = msg_queue_obj->hmsg_mgr;
if (!hmsg_mgr->msg_free_list) {
status = -EFAULT;
goto func_end;
}
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* If a message frame is available, use it */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
msg_frame_obj =
(struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
if (msg_frame_obj != NULL) {
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
lst_put_tail(hmsg_mgr->msg_used_list,
(struct list_head *)msg_frame_obj);
hmsg_mgr->msgs_pending++;
put_msg = true;
}
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
if (!list_empty(&hmsg_mgr->msg_free_list)) {
msg_frame_obj = list_first_entry(&hmsg_mgr->msg_free_list,
struct msg_frame, list_elem);
list_del(&msg_frame_obj->list_elem);
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
list_add_tail(&msg_frame_obj->list_elem,
&hmsg_mgr->msg_used_list);
hmsg_mgr->msgs_pending++;
put_msg = true;
if (list_empty(&hmsg_mgr->msg_free_list))
sync_reset_event(hmsg_mgr->sync_event);
/* Release critical section before scheduling DPC */
@ -452,34 +416,34 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj,
(void)sync_set_event(msg_queue_obj->sync_done_ack);
status = -EPERM;
} else {
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
if (list_empty(&hmsg_mgr->msg_free_list)) {
status = -EFAULT;
goto func_cont;
}
/* Get msg from free list */
msg_frame_obj = (struct msg_frame *)
lst_get_head(hmsg_mgr->msg_free_list);
msg_frame_obj = list_first_entry(
&hmsg_mgr->msg_free_list,
struct msg_frame, list_elem);
list_del(&msg_frame_obj->list_elem);
/*
* Copy message into pmsg and put frame on the
* used list.
*/
if (msg_frame_obj) {
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
lst_put_tail(hmsg_mgr->msg_used_list,
(struct list_head *)msg_frame_obj);
hmsg_mgr->msgs_pending++;
/*
* Schedule a DPC, to do the actual
* data transfer.
*/
iosm_schedule(hmsg_mgr->hio_mgr);
}
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
list_add_tail(&msg_frame_obj->list_elem,
&hmsg_mgr->msg_used_list);
hmsg_mgr->msgs_pending++;
/*
* Schedule a DPC, to do the actual
* data transfer.
*/
iosm_schedule(hmsg_mgr->hio_mgr);
msg_queue_obj->io_msg_pend--;
/* Reset event if there are still frames available */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
if (!list_empty(&hmsg_mgr->msg_free_list))
sync_set_event(hmsg_mgr->sync_event);
func_cont:
/* Exit critical section */
@ -551,15 +515,14 @@ void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
* ======== add_new_msg ========
* Must be called in message manager critical section.
*/
static int add_new_msg(struct lst_list *msg_list)
static int add_new_msg(struct list_head *msg_list)
{
struct msg_frame *pmsg;
int status = 0;
pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
if (pmsg != NULL) {
lst_init_elem((struct list_head *)pmsg);
lst_put_tail(msg_list, (struct list_head *)pmsg);
list_add_tail(&pmsg->list_elem, msg_list);
} else {
status = -ENOMEM;
}
@ -575,22 +538,9 @@ static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
if (!hmsg_mgr)
goto func_end;
if (hmsg_mgr->queue_list) {
if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
kfree(hmsg_mgr->queue_list);
hmsg_mgr->queue_list = NULL;
}
}
if (hmsg_mgr->msg_free_list) {
free_msg_list(hmsg_mgr->msg_free_list);
hmsg_mgr->msg_free_list = NULL;
}
if (hmsg_mgr->msg_used_list) {
free_msg_list(hmsg_mgr->msg_used_list);
hmsg_mgr->msg_used_list = NULL;
}
/* FIXME: free elements from queue_list? */
free_msg_list(&hmsg_mgr->msg_free_list);
free_msg_list(&hmsg_mgr->msg_used_list);
kfree(hmsg_mgr->sync_event);
@ -605,37 +555,26 @@ func_end:
static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 num_to_dsp)
{
struct msg_mgr *hmsg_mgr;
struct msg_frame *pmsg;
struct msg_frame *pmsg, *tmp;
u32 i;
if (!msg_queue_obj ||
!msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
goto func_end;
hmsg_mgr = msg_queue_obj->hmsg_mgr;
/* Pull off num_to_dsp message frames from Msg manager and free */
for (i = 0; i < num_to_dsp; i++) {
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
pmsg = (struct msg_frame *)
lst_get_head(hmsg_mgr->msg_free_list);
kfree(pmsg);
} else {
/* Cannot free all of the message frames */
i = 0;
list_for_each_entry_safe(pmsg, tmp, &hmsg_mgr->msg_free_list,
list_elem) {
list_del(&pmsg->list_elem);
kfree(pmsg);
if (i++ >= num_to_dsp)
break;
}
}
if (msg_queue_obj->msg_free_list) {
free_msg_list(msg_queue_obj->msg_free_list);
msg_queue_obj->msg_free_list = NULL;
}
if (msg_queue_obj->msg_used_list) {
free_msg_list(msg_queue_obj->msg_used_list);
msg_queue_obj->msg_used_list = NULL;
}
free_msg_list(&msg_queue_obj->msg_free_list);
free_msg_list(&msg_queue_obj->msg_used_list);
if (msg_queue_obj->ntfy_obj) {
ntfy_delete(msg_queue_obj->ntfy_obj);
@ -655,19 +594,18 @@ func_end:
/*
* ======== free_msg_list ========
*/
static void free_msg_list(struct lst_list *msg_list)
static void free_msg_list(struct list_head *msg_list)
{
struct msg_frame *pmsg;
struct msg_frame *pmsg, *tmp;
if (!msg_list)
goto func_end;
while ((pmsg = (struct msg_frame *)lst_get_head(msg_list)) != NULL)
list_for_each_entry_safe(pmsg, tmp, msg_list, list_elem) {
list_del(&pmsg->list_elem);
kfree(pmsg);
}
DBC_ASSERT(LST_IS_EMPTY(msg_list));
kfree(msg_list);
func_end:
return;
}

View file

@ -26,7 +26,7 @@
#include <dspbridge/dspapi.h>
#include <dspbridge/dspdefs.h>
#include <dspbridge/list.h>
#include <linux/list.h>
#include <dspbridge/ntfy.h>
/*
@ -148,13 +148,13 @@ struct chnl_object {
struct sync_object *sync_event;
u32 process; /* Process which created this channel */
u32 pcb_arg; /* Argument to use with callback */
struct lst_list *pio_requests; /* List of IOR's to driver */
struct list_head pio_requests; /* List of IOR's to driver */
s32 cio_cs; /* Number of IOC's in queue */
s32 cio_reqs; /* Number of IORequests in queue */
s32 chnl_packets; /* Initial number of free Irps */
/* List of IOC's from driver */
struct lst_list *pio_completions;
struct lst_list *free_packets_list; /* List of free Irps */
struct list_head pio_completions;
struct list_head free_packets_list; /* List of free Irps */
struct ntfy_object *ntfy_obj;
u32 bytes_moved; /* Total number of bytes transfered */

View file

@ -19,7 +19,6 @@
#ifndef CMMDEFS_
#define CMMDEFS_
#include <dspbridge/list.h>
/* Cmm attributes used in cmm_create() */
struct cmm_mgrattrs {

View file

@ -20,6 +20,7 @@
#define _SYNC_H
#include <dspbridge/dbdefs.h>
#include <dspbridge/host_os.h>
/* Special timeout value indicating an infinite wait: */