staging: tidspbridge: set3 remove hungarian from structs

hungarian notation will be removed from the elements inside
structures, the next varibles will be renamed:

Original:               Replacement:
dw_seg_base_va          seg_base_va
dw_self_loop            self_loop
dw_state                state
dw_tc_endianism         tc_endianism
dw_test_base            test_base
dw_type                 type
dw_val1                 val1
dw_val2                 val2
dw_val3                 val3
dw_va                   va
dw_virt_base            virt_base
dw_vm_base              vm_base
dw_vm_size              vm_size
pfn_allocate            allocate
pfn_brd_mem_copy        brd_mem_copy
pfn_brd_mem_map         brd_mem_map
pfn_brd_mem_un_map      brd_mem_un_map
pfn_brd_mem_write       brd_mem_write
pfn_brd_monitor         brd_monitor
pfn_brd_read            brd_read

Signed-off-by: Rene Sapiens <rene.sapiens@ti.com>
Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
This commit is contained in:
Rene Sapiens 2011-01-18 03:19:05 +00:00 committed by Omar Ramirez Luna
parent 5108de0ae0
commit 3c882de542
15 changed files with 86 additions and 86 deletions

View file

@ -332,9 +332,9 @@ struct bridge_dev_context {
u32 dsp_clk_m2_base; /* DSP Clock Module m2 */ u32 dsp_clk_m2_base; /* DSP Clock Module m2 */
u32 public_rhea; /* Pub Rhea */ u32 public_rhea; /* Pub Rhea */
u32 int_addr; /* MB INTR reg */ u32 int_addr; /* MB INTR reg */
u32 dw_tc_endianism; /* TC Endianism register */ u32 tc_endianism; /* TC Endianism register */
u32 dw_test_base; /* DSP MMU Mapped registers */ u32 test_base; /* DSP MMU Mapped registers */
u32 dw_self_loop; /* Pointer to the selfloop */ u32 self_loop; /* Pointer to the selfloop */
u32 dsp_start_add; /* API Boot vector */ u32 dsp_start_add; /* API Boot vector */
u32 internal_size; /* Internal memory size */ u32 internal_size; /* Internal memory size */

View file

@ -115,7 +115,7 @@ int bridge_chnl_add_io_req(struct chnl_object *chnl_obj, void *host_buf,
* Check the channel state: only queue chirp if channel state * Check the channel state: only queue chirp if channel state
* allows it. * allows it.
*/ */
dw_state = pchnl->dw_state; dw_state = pchnl->state;
if (dw_state != CHNL_STATEREADY) { if (dw_state != CHNL_STATEREADY) {
if (dw_state & CHNL_STATECANCEL) if (dw_state & CHNL_STATECANCEL)
return -ECANCELED; return -ECANCELED;
@ -207,7 +207,7 @@ func_cont:
* more IOR's. * more IOR's.
*/ */
if (is_eos) if (is_eos)
pchnl->dw_state |= CHNL_STATEEOS; pchnl->state |= CHNL_STATEEOS;
/* Legacy DSM Processor-Copy */ /* Legacy DSM Processor-Copy */
DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY); DBC_ASSERT(pchnl->chnl_type == CHNL_PCPY);
@ -258,7 +258,7 @@ int bridge_chnl_cancel_io(struct chnl_object *chnl_obj)
* IORequests or dispatching. */ * IORequests or dispatching. */
spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock); spin_lock_bh(&chnl_mgr_obj->chnl_mgr_lock);
pchnl->dw_state |= CHNL_STATECANCEL; pchnl->state |= CHNL_STATECANCEL;
if (list_empty(&pchnl->pio_requests)) { if (list_empty(&pchnl->pio_requests)) {
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
@ -312,7 +312,7 @@ int bridge_chnl_close(struct chnl_object *chnl_obj)
if (status) if (status)
return status; return status;
/* Assert I/O on this channel is now cancelled: Protects from io_dpc */ /* Assert I/O on this channel is now cancelled: Protects from io_dpc */
DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL)); DBC_ASSERT((pchnl->state & CHNL_STATECANCEL));
/* Invalidate channel object: Protects from CHNL_GetIOCompletion() */ /* Invalidate channel object: Protects from CHNL_GetIOCompletion() */
/* Free the slot in the channel manager: */ /* Free the slot in the channel manager: */
pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL; pchnl->chnl_mgr_obj->ap_channel[pchnl->chnl_id] = NULL;
@ -381,7 +381,7 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr,
* max_channels, GFP_KERNEL); * max_channels, GFP_KERNEL);
if (chnl_mgr_obj->ap_channel) { if (chnl_mgr_obj->ap_channel) {
/* Initialize chnl_mgr object */ /* Initialize chnl_mgr object */
chnl_mgr_obj->dw_type = CHNL_TYPESM; chnl_mgr_obj->type = CHNL_TYPESM;
chnl_mgr_obj->word_size = mgr_attrts->word_size; chnl_mgr_obj->word_size = mgr_attrts->word_size;
/* Total # chnls supported */ /* Total # chnls supported */
chnl_mgr_obj->max_channels = max_channels; chnl_mgr_obj->max_channels = max_channels;
@ -488,7 +488,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout)
} else { } else {
status = bridge_chnl_cancel_io(chnl_obj); status = bridge_chnl_cancel_io(chnl_obj);
/* Now, leave the channel in the ready state: */ /* Now, leave the channel in the ready state: */
pchnl->dw_state &= ~CHNL_STATECANCEL; pchnl->state &= ~CHNL_STATECANCEL;
} }
} }
DBC_ENSURE(status || list_empty(&pchnl->pio_requests)); DBC_ENSURE(status || list_empty(&pchnl->pio_requests));
@ -517,7 +517,7 @@ int bridge_chnl_get_info(struct chnl_object *chnl_obj,
channel_info->sync_event = pchnl->sync_event; channel_info->sync_event = pchnl->sync_event;
channel_info->cio_cs = pchnl->cio_cs; channel_info->cio_cs = pchnl->cio_cs;
channel_info->cio_reqs = pchnl->cio_reqs; channel_info->cio_reqs = pchnl->cio_reqs;
channel_info->dw_state = pchnl->dw_state; channel_info->state = pchnl->state;
} else { } else {
status = -EFAULT; status = -EFAULT;
} }
@ -687,7 +687,7 @@ int bridge_chnl_get_mgr_info(struct chnl_mgr *hchnl_mgr, u32 ch_id,
/* Return the requested information: */ /* Return the requested information: */
mgr_info->chnl_obj = chnl_mgr_obj->ap_channel[ch_id]; mgr_info->chnl_obj = chnl_mgr_obj->ap_channel[ch_id];
mgr_info->open_channels = chnl_mgr_obj->open_channels; mgr_info->open_channels = chnl_mgr_obj->open_channels;
mgr_info->dw_type = chnl_mgr_obj->dw_type; mgr_info->type = chnl_mgr_obj->type;
/* total # of chnls */ /* total # of chnls */
mgr_info->max_channels = chnl_mgr_obj->max_channels; mgr_info->max_channels = chnl_mgr_obj->max_channels;
@ -718,7 +718,7 @@ int bridge_chnl_idle(struct chnl_object *chnl_obj, u32 timeout,
/* Reset the byte count and put channel back in ready state. */ /* Reset the byte count and put channel back in ready state. */
chnl_obj->bytes_moved = 0; chnl_obj->bytes_moved = 0;
chnl_obj->dw_state &= ~CHNL_STATECANCEL; chnl_obj->state &= ~CHNL_STATECANCEL;
} }
return status; return status;
@ -769,7 +769,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
return -ENOMEM; return -ENOMEM;
/* Protect queues from io_dpc: */ /* Protect queues from io_dpc: */
pchnl->dw_state = CHNL_STATECANCEL; pchnl->state = CHNL_STATECANCEL;
/* Allocate initial IOR and IOC queues: */ /* Allocate initial IOR and IOC queues: */
status = create_chirp_list(&pchnl->free_packets_list, status = create_chirp_list(&pchnl->free_packets_list,
@ -817,7 +817,7 @@ int bridge_chnl_open(struct chnl_object **chnl,
chnl_mgr_obj->open_channels++; chnl_mgr_obj->open_channels++;
spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock); spin_unlock_bh(&chnl_mgr_obj->chnl_mgr_lock);
/* Return result... */ /* Return result... */
pchnl->dw_state = CHNL_STATEREADY; pchnl->state = CHNL_STATEREADY;
*chnl = pchnl; *chnl = pchnl;
return status; return status;

View file

@ -483,7 +483,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
1)) == 0)) { 1)) == 0)) {
status = status =
hio_mgr->intf_fxns-> hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context, brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr, pa_curr, va_curr,
page_size[i], map_attrs, page_size[i], map_attrs,
NULL); NULL);
@ -549,7 +549,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
} else { } else {
status = status =
hio_mgr->intf_fxns-> hio_mgr->intf_fxns->
pfn_brd_mem_map(hio_mgr->hbridge_context, brd_mem_map(hio_mgr->hbridge_context,
pa_curr, va_curr, pa_curr, va_curr,
page_size[i], map_attrs, page_size[i], map_attrs,
NULL); NULL);
@ -615,7 +615,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
ae_proc[ndx].ul_dsp_va); ae_proc[ndx].ul_dsp_va);
ndx++; ndx++;
} else { } else {
status = hio_mgr->intf_fxns->pfn_brd_mem_map status = hio_mgr->intf_fxns->brd_mem_map
(hio_mgr->hbridge_context, (hio_mgr->hbridge_context,
hio_mgr->ext_proc_info.ty_tlb[i]. hio_mgr->ext_proc_info.ty_tlb[i].
ul_gpp_phys, ul_gpp_phys,
@ -637,7 +637,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
/* Map the L4 peripherals */ /* Map the L4 peripherals */
i = 0; i = 0;
while (l4_peripheral_table[i].phys_addr) { while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->pfn_brd_mem_map status = hio_mgr->intf_fxns->brd_mem_map
(hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
map_attrs, NULL); map_attrs, NULL);
@ -977,8 +977,8 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
* Assertion fires if CHNL_AddIOReq() called on a stream * Assertion fires if CHNL_AddIOReq() called on a stream
* which was cancelled, or attached to a dead board. * which was cancelled, or attached to a dead board.
*/ */
DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) || DBC_ASSERT((pchnl->state == CHNL_STATEREADY) ||
(pchnl->dw_state == CHNL_STATEEOS)); (pchnl->state == CHNL_STATEEOS));
/* Indicate to the DSP we have a buffer available for input */ /* Indicate to the DSP we have a buffer available for input */
set_chnl_busy(sm, pchnl->chnl_id); set_chnl_busy(sm, pchnl->chnl_id);
*mbx_val = MBX_PCPY_CLASS; *mbx_val = MBX_PCPY_CLASS;
@ -987,7 +987,7 @@ void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
* This assertion fails if CHNL_AddIOReq() was called on a * This assertion fails if CHNL_AddIOReq() was called on a
* stream which was cancelled, or attached to a dead board. * stream which was cancelled, or attached to a dead board.
*/ */
DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) == DBC_ASSERT((pchnl->state & ~CHNL_STATEEOS) ==
CHNL_STATEREADY); CHNL_STATEREADY);
/* /*
* Record the fact that we have a buffer available for * Record the fact that we have a buffer available for
@ -1092,7 +1092,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
} }
pchnl = chnl_mgr_obj->ap_channel[chnl_id]; pchnl = chnl_mgr_obj->ap_channel[chnl_id];
if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) { if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) { if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
/* Get the I/O request, and attempt a transfer */ /* Get the I/O request, and attempt a transfer */
if (!list_empty(&pchnl->pio_requests)) { if (!list_empty(&pchnl->pio_requests)) {
if (!pchnl->cio_reqs) if (!pchnl->cio_reqs)
@ -1122,7 +1122,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
* sends EOS more than once on this * sends EOS more than once on this
* channel. * channel.
*/ */
if (pchnl->dw_state & CHNL_STATEEOS) if (pchnl->state & CHNL_STATEEOS)
goto func_end; goto func_end;
/* /*
* Zero bytes indicates EOS. Update * Zero bytes indicates EOS. Update
@ -1131,7 +1131,7 @@ static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
*/ */
chnl_packet_obj->status |= chnl_packet_obj->status |=
CHNL_IOCSTATEOS; CHNL_IOCSTATEOS;
pchnl->dw_state |= CHNL_STATEEOS; pchnl->state |= CHNL_STATEEOS;
/* /*
* Notify that end of stream has * Notify that end of stream has
* occurred. * occurred.
@ -1329,7 +1329,7 @@ static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
if (sm->output_full) if (sm->output_full)
goto func_end; goto func_end;
if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY)) if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
goto func_end; goto func_end;
/* Look to see if both a PC and DSP output channel are ready */ /* Look to see if both a PC and DSP output channel are ready */
@ -1810,7 +1810,7 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC); psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
if (psz_buf != NULL) { if (psz_buf != NULL) {
/* Read trace buffer data */ /* Read trace buffer data */
status = (*intf_fxns->pfn_brd_read)(pbridge_context, status = (*intf_fxns->brd_read)(pbridge_context,
(u8 *)psz_buf, (u32)ul_trace_begin, (u8 *)psz_buf, (u32)ul_trace_begin,
ul_num_bytes, 0); ul_num_bytes, 0);
@ -1825,7 +1825,7 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
__func__, psz_buf); __func__, psz_buf);
/* Read the value at the DSP address in trace_cur_pos. */ /* Read the value at the DSP address in trace_cur_pos. */
status = (*intf_fxns->pfn_brd_read)(pbridge_context, status = (*intf_fxns->brd_read)(pbridge_context,
(u8 *)&trace_cur_pos, (u32)trace_cur_pos, (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
4, 0); 4, 0);
if (status) if (status)
@ -1992,7 +1992,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
poll_cnt < POLL_MAX) { poll_cnt < POLL_MAX) {
/* Read DSP dump size from the DSP trace buffer... */ /* Read DSP dump size from the DSP trace buffer... */
status = (*intf_fxns->pfn_brd_read)(bridge_context, status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)&mmu_fault_dbg_info, (u32)trace_begin, (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
sizeof(mmu_fault_dbg_info), 0); sizeof(mmu_fault_dbg_info), 0);
@ -2028,7 +2028,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context)
buffer_end = buffer + total_size / 4; buffer_end = buffer + total_size / 4;
/* Read bytes from the DSP trace buffer... */ /* Read bytes from the DSP trace buffer... */
status = (*intf_fxns->pfn_brd_read)(bridge_context, status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)buffer, (u32)trace_begin, (u8 *)buffer, (u32)trace_begin,
total_size, 0); total_size, 0);
if (status) { if (status) {
@ -2189,7 +2189,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr); pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
/* Copy the modules_header structure from DSP memory. */ /* Copy the modules_header structure from DSP memory. */
status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr, status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
(u32) module_dsp_addr, sizeof(modules_hdr), 0); (u32) module_dsp_addr, sizeof(modules_hdr), 0);
if (status) { if (status) {
@ -2224,7 +2224,7 @@ void dump_dl_modules(struct bridge_dev_context *bridge_context)
goto func_end; goto func_end;
} }
/* Copy the dll_module structure from DSP memory */ /* Copy the dll_module structure from DSP memory */
status = (*intf_fxns->pfn_brd_read)(bridge_context, status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)module_struct, module_dsp_addr, module_size, 0); (u8 *)module_struct, module_dsp_addr, module_size, 0);
if (status) { if (status) {

View file

@ -765,7 +765,7 @@ static int bridge_dev_create(struct bridge_dev_context
} }
dev_context->dsp_start_add = (u32) OMAP_GEM_BASE; dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
dev_context->dw_self_loop = (u32) NULL; dev_context->self_loop = (u32) NULL;
dev_context->dsp_per_clks = 0; dev_context->dsp_per_clks = 0;
dev_context->internal_size = OMAP_DSP_SIZE; dev_context->internal_size = OMAP_DSP_SIZE;
/* Clear dev context MMU table entries. /* Clear dev context MMU table entries.

View file

@ -127,7 +127,7 @@ struct chnl_mgr {
u8 max_channels; /* Total number of channels */ u8 max_channels; /* Total number of channels */
u8 open_channels; /* Total number of open channels */ u8 open_channels; /* Total number of open channels */
struct chnl_object **ap_channel; /* Array of channels */ struct chnl_object **ap_channel; /* Array of channels */
u8 dw_type; /* Type of channel class library */ u8 type; /* Type of channel class library */
/* If no shm syms, return for CHNL_Open */ /* If no shm syms, return for CHNL_Open */
int chnl_open_status; int chnl_open_status;
}; };
@ -140,7 +140,7 @@ struct chnl_object {
/* Pointer back to channel manager */ /* Pointer back to channel manager */
struct chnl_mgr *chnl_mgr_obj; struct chnl_mgr *chnl_mgr_obj;
u32 chnl_id; /* Channel id */ u32 chnl_id; /* Channel id */
u8 dw_state; /* Current channel state */ u8 state; /* Current channel state */
s8 chnl_mode; /* Chnl mode and attributes */ s8 chnl_mode; /* Chnl mode and attributes */
/* Chnl I/O completion event (user mode) */ /* Chnl I/O completion event (user mode) */
void *user_event; void *user_event;

View file

@ -59,7 +59,7 @@ struct chnl_info {
/*Abstraction of I/O completion event. */ /*Abstraction of I/O completion event. */
struct sync_object *sync_event; struct sync_object *sync_event;
s8 mode; /* Channel mode. */ s8 mode; /* Channel mode. */
u8 dw_state; /* Current channel state. */ u8 state; /* Current channel state. */
u32 bytes_tx; /* Total bytes transferred. */ u32 bytes_tx; /* Total bytes transferred. */
u32 cio_cs; /* Number of IOCs in queue. */ u32 cio_cs; /* Number of IOCs in queue. */
u32 cio_reqs; /* Number of IO Requests in queue. */ u32 cio_reqs; /* Number of IO Requests in queue. */
@ -68,7 +68,7 @@ struct chnl_info {
/* Channel manager info: */ /* Channel manager info: */
struct chnl_mgrinfo { struct chnl_mgrinfo {
u8 dw_type; /* Type of channel class library. */ u8 type; /* Type of channel class library. */
/* Channel handle, given the channel id. */ /* Channel handle, given the channel id. */
struct chnl_object *chnl_obj; struct chnl_object *chnl_obj;
u8 open_channels; /* Number of open channels. */ u8 open_channels; /* Number of open channels. */

View file

@ -60,7 +60,7 @@ struct cmm_seginfo {
u32 ul_dsp_size; /* DSP seg size in bytes */ u32 ul_dsp_size; /* DSP seg size in bytes */
/* # of current GPP allocations from this segment */ /* # of current GPP allocations from this segment */
u32 ul_in_use_cnt; u32 ul_in_use_cnt;
u32 dw_seg_base_va; /* Start Virt address of SM seg */ u32 seg_base_va; /* Start Virt address of SM seg */
}; };
@ -83,8 +83,8 @@ struct cmm_xlatorattrs {
u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */ u32 dsp_buf_size; /* size of DSP-side bufs in GPP bytes */
/* Vm base address alloc'd in client process context */ /* Vm base address alloc'd in client process context */
void *vm_base; void *vm_base;
/* dw_vm_size must be >= (dwMaxNumBufs * dwMaxSize) */ /* vm_size must be >= (dwMaxNumBufs * dwMaxSize) */
u32 dw_vm_size; u32 vm_size;
}; };
/* /*

View file

@ -369,9 +369,9 @@ struct dsp_processorinfo {
/* Error information of last DSP exception signalled to the GPP */ /* Error information of last DSP exception signalled to the GPP */
struct dsp_errorinfo { struct dsp_errorinfo {
u32 err_mask; u32 err_mask;
u32 dw_val1; u32 val1;
u32 dw_val2; u32 val2;
u32 dw_val3; u32 val3;
}; };
/* The dsp_processorstate structure describes the state of a DSP processor */ /* The dsp_processorstate structure describes the state of a DSP processor */

View file

@ -978,17 +978,17 @@ struct bridge_drv_interface {
fxn_dev_create pfn_dev_create; /* Create device context */ fxn_dev_create pfn_dev_create; /* Create device context */
fxn_dev_destroy pfn_dev_destroy; /* Destroy device context */ fxn_dev_destroy pfn_dev_destroy; /* Destroy device context */
fxn_dev_ctrl pfn_dev_cntrl; /* Optional vendor interface */ fxn_dev_ctrl pfn_dev_cntrl; /* Optional vendor interface */
fxn_brd_monitor pfn_brd_monitor; /* Load and/or start monitor */ fxn_brd_monitor brd_monitor; /* Load and/or start monitor */
fxn_brd_start pfn_brd_start; /* Start DSP program. */ fxn_brd_start pfn_brd_start; /* Start DSP program. */
fxn_brd_stop pfn_brd_stop; /* Stop/reset board. */ fxn_brd_stop pfn_brd_stop; /* Stop/reset board. */
fxn_brd_status pfn_brd_status; /* Get current board status. */ fxn_brd_status pfn_brd_status; /* Get current board status. */
fxn_brd_read pfn_brd_read; /* Read board memory */ fxn_brd_read brd_read; /* Read board memory */
fxn_brd_write pfn_brd_write; /* Write board memory. */ fxn_brd_write pfn_brd_write; /* Write board memory. */
fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ fxn_brd_memcopy brd_mem_copy; /* Copies DSP Memory */
fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ fxn_brd_memwrite brd_mem_write; /* Write DSP Memory w/o halt */
fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */ fxn_brd_memmap brd_mem_map; /* Maps MPU mem to DSP mem */
fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ fxn_brd_memunmap brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ fxn_chnl_open pfn_chnl_open; /* Create a new channel. */

View file

@ -280,7 +280,7 @@ typedef int(*nldr_unloadfxn) (struct nldr_nodeobject *nldr_node_obj,
* ======== node_ldr_fxns ======== * ======== node_ldr_fxns ========
*/ */
struct node_ldr_fxns { struct node_ldr_fxns {
nldr_allocatefxn pfn_allocate; nldr_allocatefxn allocate;
nldr_createfxn pfn_create; nldr_createfxn pfn_create;
nldr_deletefxn pfn_delete; nldr_deletefxn pfn_delete;
nldr_exitfxn pfn_exit; nldr_exitfxn pfn_exit;

View file

@ -64,7 +64,7 @@
struct cmm_allocator { /* sma */ struct cmm_allocator { /* sma */
unsigned int shm_base; /* Start of physical SM block */ unsigned int shm_base; /* Start of physical SM block */
u32 ul_sm_size; /* Size of SM block in bytes */ u32 ul_sm_size; /* Size of SM block in bytes */
unsigned int dw_vm_base; /* Start of VM block. (Dev driver unsigned int vm_base; /* Start of VM block. (Dev driver
* context for 'sma') */ * context for 'sma') */
u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
* SM space */ * SM space */
@ -86,7 +86,7 @@ struct cmm_xlator { /* Pa<->Va translator object */
* base address for translator's ul_seg_id. * base address for translator's ul_seg_id.
* Only 1 segment ID currently supported. * Only 1 segment ID currently supported.
*/ */
unsigned int dw_virt_base; /* virtual base address */ unsigned int virt_base; /* virtual base address */
u32 ul_virt_size; /* size of virt space in bytes */ u32 ul_virt_size; /* size of virt space in bytes */
u32 ul_seg_id; /* Segment Id */ u32 ul_seg_id; /* Segment Id */
}; };
@ -122,14 +122,14 @@ static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
0, /* dsp_bufs */ 0, /* dsp_bufs */
0, /* dsp_buf_size */ 0, /* dsp_buf_size */
NULL, /* vm_base */ NULL, /* vm_base */
0, /* dw_vm_size */ 0, /* vm_size */
}; };
/* SM node representing a block of memory. */ /* SM node representing a block of memory. */
struct cmm_mnode { struct cmm_mnode {
struct list_head link; /* must be 1st element */ struct list_head link; /* must be 1st element */
u32 pa; /* Phys addr */ u32 pa; /* Phys addr */
u32 dw_va; /* Virtual address in device process context */ u32 va; /* Virtual address in device process context */
u32 ul_size; /* SM block size in bytes */ u32 ul_size; /* SM block size in bytes */
u32 client_proc; /* Process that allocated this mem block */ u32 client_proc; /* Process that allocated this mem block */
}; };
@ -200,7 +200,7 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
* add to freelist */ * add to freelist */
new_node = new_node =
get_node(cmm_mgr_obj, pnode->pa + usize, get_node(cmm_mgr_obj, pnode->pa + usize,
pnode->dw_va + usize, pnode->va + usize,
(u32) delta_size); (u32) delta_size);
/* leftovers go free */ /* leftovers go free */
add_to_free_list(allocator, new_node); add_to_free_list(allocator, new_node);
@ -218,13 +218,13 @@ void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
list_add_tail(&pnode->link, &allocator->in_use_list); list_add_tail(&pnode->link, &allocator->in_use_list);
buf_pa = (void *)pnode->pa; /* physical address */ buf_pa = (void *)pnode->pa; /* physical address */
/* clear mem */ /* clear mem */
pbyte = (u8 *) pnode->dw_va; pbyte = (u8 *) pnode->va;
for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++) for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
*pbyte = 0; *pbyte = 0;
if (pp_buf_va != NULL) { if (pp_buf_va != NULL) {
/* Virtual address */ /* Virtual address */
*pp_buf_va = (void *)pnode->dw_va; *pp_buf_va = (void *)pnode->va;
} }
} }
mutex_unlock(&cmm_mgr_obj->cmm_lock); mutex_unlock(&cmm_mgr_obj->cmm_lock);
@ -450,8 +450,8 @@ int cmm_get_info(struct cmm_object *hcmm_mgr,
altr->dsp_base; altr->dsp_base;
cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size = cmm_info_obj->seg_info[ul_seg - 1].ul_dsp_size =
altr->ul_dsp_size; altr->ul_dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].dw_seg_base_va = cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
altr->dw_vm_base - altr->ul_dsp_size; altr->vm_base - altr->ul_dsp_size;
cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0; cmm_info_obj->seg_info[ul_seg - 1].ul_in_use_cnt = 0;
list_for_each_entry(curr, &altr->in_use_list, link) { list_for_each_entry(curr, &altr->in_use_list, link) {
@ -539,12 +539,12 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
psma->hcmm_mgr = hcmm_mgr; /* ref to parent */ psma->hcmm_mgr = hcmm_mgr; /* ref to parent */
psma->shm_base = dw_gpp_base_pa; /* SM Base phys */ psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
psma->ul_sm_size = ul_size; /* SM segment size in bytes */ psma->ul_sm_size = ul_size; /* SM segment size in bytes */
psma->dw_vm_base = gpp_base_va; psma->vm_base = gpp_base_va;
psma->dsp_phys_addr_offset = dsp_addr_offset; psma->dsp_phys_addr_offset = dsp_addr_offset;
psma->c_factor = c_factor; psma->c_factor = c_factor;
psma->dsp_base = dw_dsp_base; psma->dsp_base = dw_dsp_base;
psma->ul_dsp_size = ul_dsp_size; psma->ul_dsp_size = ul_dsp_size;
if (psma->dw_vm_base == 0) { if (psma->vm_base == 0) {
status = -EPERM; status = -EPERM;
goto func_end; goto func_end;
} }
@ -556,7 +556,7 @@ int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
/* Get a mem node for this hunk-o-memory */ /* Get a mem node for this hunk-o-memory */
new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa, new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
psma->dw_vm_base, ul_size); psma->vm_base, ul_size);
/* Place node on the SM allocator's free list */ /* Place node on the SM allocator's free list */
if (new_node) { if (new_node) {
list_add_tail(&new_node->link, &psma->free_list); list_add_tail(&new_node->link, &psma->free_list);
@ -649,8 +649,8 @@ static void un_register_gppsm_seg(struct cmm_allocator *psma)
kfree(curr); kfree(curr);
} }
if ((void *)psma->dw_vm_base != NULL) if ((void *)psma->vm_base != NULL)
MEM_UNMAP_LINEAR_ADDRESS((void *)psma->dw_vm_base); MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
/* Free allocator itself */ /* Free allocator itself */
kfree(psma); kfree(psma);
@ -705,7 +705,7 @@ static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
} }
pnode->pa = dw_pa; pnode->pa = dw_pa;
pnode->dw_va = dw_va; pnode->va = dw_va;
pnode->ul_size = ul_size; pnode->ul_size = ul_size;
return pnode; return pnode;
@ -770,7 +770,7 @@ static void add_to_free_list(struct cmm_allocator *allocator,
} }
if (curr->pa == NEXT_PA(node)) { if (curr->pa == NEXT_PA(node)) {
curr->pa = node->pa; curr->pa = node->pa;
curr->dw_va = node->dw_va; curr->va = node->va;
curr->ul_size += node->ul_size; curr->ul_size += node->ul_size;
delete_node(allocator->hcmm_mgr, node); delete_node(allocator->hcmm_mgr, node);
return; return;
@ -925,10 +925,10 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
if (xlator_obj) { if (xlator_obj) {
if (set_info) { if (set_info) {
/* set translators virtual address range */ /* set translators virtual address range */
xlator_obj->dw_virt_base = (u32) *paddr; xlator_obj->virt_base = (u32) *paddr;
xlator_obj->ul_virt_size = ul_size; xlator_obj->ul_virt_size = ul_size;
} else { /* return virt base address */ } else { /* return virt base address */
*paddr = (u8 *) xlator_obj->dw_virt_base; *paddr = (u8 *) xlator_obj->virt_base;
} }
} else { } else {
status = -EFAULT; status = -EFAULT;
@ -969,18 +969,18 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
allocator-> allocator->
ul_dsp_size); ul_dsp_size);
dw_addr_xlate = xlator_obj->dw_virt_base + dw_offset; dw_addr_xlate = xlator_obj->virt_base + dw_offset;
/* Check if translated Va base is in range */ /* Check if translated Va base is in range */
if ((dw_addr_xlate < xlator_obj->dw_virt_base) || if ((dw_addr_xlate < xlator_obj->virt_base) ||
(dw_addr_xlate >= (dw_addr_xlate >=
(xlator_obj->dw_virt_base + (xlator_obj->virt_base +
xlator_obj->ul_virt_size))) { xlator_obj->ul_virt_size))) {
dw_addr_xlate = 0; /* bad address */ dw_addr_xlate = 0; /* bad address */
} }
} else { } else {
/* Gpp PA = Gpp Base + offset */ /* Gpp PA = Gpp Base + offset */
dw_offset = dw_offset =
(u8 *) paddr - (u8 *) xlator_obj->dw_virt_base; (u8 *) paddr - (u8 *) xlator_obj->virt_base;
dw_addr_xlate = dw_addr_xlate =
allocator->shm_base - allocator->ul_dsp_size + allocator->shm_base - allocator->ul_dsp_size +
dw_offset; dw_offset;

View file

@ -1082,17 +1082,17 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
STORE_FXN(fxn_dev_create, pfn_dev_create); STORE_FXN(fxn_dev_create, pfn_dev_create);
STORE_FXN(fxn_dev_destroy, pfn_dev_destroy); STORE_FXN(fxn_dev_destroy, pfn_dev_destroy);
STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl); STORE_FXN(fxn_dev_ctrl, pfn_dev_cntrl);
STORE_FXN(fxn_brd_monitor, pfn_brd_monitor); STORE_FXN(fxn_brd_monitor, brd_monitor);
STORE_FXN(fxn_brd_start, pfn_brd_start); STORE_FXN(fxn_brd_start, pfn_brd_start);
STORE_FXN(fxn_brd_stop, pfn_brd_stop); STORE_FXN(fxn_brd_stop, pfn_brd_stop);
STORE_FXN(fxn_brd_status, pfn_brd_status); STORE_FXN(fxn_brd_status, pfn_brd_status);
STORE_FXN(fxn_brd_read, pfn_brd_read); STORE_FXN(fxn_brd_read, brd_read);
STORE_FXN(fxn_brd_write, pfn_brd_write); STORE_FXN(fxn_brd_write, pfn_brd_write);
STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); STORE_FXN(fxn_brd_memcopy, brd_mem_copy);
STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); STORE_FXN(fxn_brd_memwrite, brd_mem_write);
STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); STORE_FXN(fxn_brd_memmap, brd_mem_map);
STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); STORE_FXN(fxn_brd_memunmap, brd_mem_un_map);
STORE_FXN(fxn_chnl_create, pfn_chnl_create); STORE_FXN(fxn_chnl_create, pfn_chnl_create);
STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
STORE_FXN(fxn_chnl_open, pfn_chnl_open); STORE_FXN(fxn_chnl_open, pfn_chnl_open);
@ -1123,11 +1123,11 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
DBC_ENSURE(intf_fxns->pfn_dev_create != NULL); DBC_ENSURE(intf_fxns->pfn_dev_create != NULL);
DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL); DBC_ENSURE(intf_fxns->pfn_dev_destroy != NULL);
DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL); DBC_ENSURE(intf_fxns->pfn_dev_cntrl != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_monitor != NULL); DBC_ENSURE(intf_fxns->brd_monitor != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_start != NULL); DBC_ENSURE(intf_fxns->pfn_brd_start != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL); DBC_ENSURE(intf_fxns->pfn_brd_stop != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_status != NULL); DBC_ENSURE(intf_fxns->pfn_brd_status != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_read != NULL); DBC_ENSURE(intf_fxns->brd_read != NULL);
DBC_ENSURE(intf_fxns->pfn_brd_write != NULL); DBC_ENSURE(intf_fxns->pfn_brd_write != NULL);
DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL); DBC_ENSURE(intf_fxns->pfn_chnl_create != NULL);
DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL); DBC_ENSURE(intf_fxns->pfn_chnl_destroy != NULL);

View file

@ -575,7 +575,7 @@ func_cont:
if (!status) { if (!status) {
/* Create object for dynamic loading */ /* Create object for dynamic loading */
status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj, status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
(void *)pnode, (void *)pnode,
&pnode->dcd_props. &pnode->dcd_props.
obj_data.node_obj, obj_data.node_obj,
@ -3075,7 +3075,7 @@ static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
if (!status) { if (!status) {
status = status =
(*intf_fxns->pfn_brd_mem_copy) (hbridge_context, (*intf_fxns->brd_mem_copy) (hbridge_context,
dsp_run_addr, dsp_load_addr, dsp_run_addr, dsp_load_addr,
ul_num_bytes, (u32) mem_space); ul_num_bytes, (u32) mem_space);
if (!status) if (!status)
@ -3117,7 +3117,7 @@ static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
/* Call new MemWrite function */ /* Call new MemWrite function */
intf_fxns = hnode_mgr->intf_fxns; intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context); status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf, status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
dsp_add, ul_num_bytes, mem_sect_type); dsp_add, ul_num_bytes, mem_sect_type);
return ul_num_bytes; return ul_num_bytes;

View file

@ -1397,7 +1397,7 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
if (!map_obj) if (!map_obj)
status = -ENOMEM; status = -ENOMEM;
else else
status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) status = (*p_proc_object->intf_fxns->brd_mem_map)
(p_proc_object->hbridge_context, pa_align, va_align, (p_proc_object->hbridge_context, pa_align, va_align,
size_align, ul_map_attr, map_obj->pages); size_align, ul_map_attr, map_obj->pages);
} }
@ -1720,7 +1720,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
/* Remove mapping from the page tables. */ /* Remove mapping from the page tables. */
if (!status) { if (!status) {
status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) status = (*p_proc_object->intf_fxns->brd_mem_un_map)
(p_proc_object->hbridge_context, va_align, size_align); (p_proc_object->hbridge_context, va_align, size_align);
} }
@ -1828,7 +1828,7 @@ static int proc_monitor(struct proc_object *proc_obj)
} }
} }
/* Place the Board in the Monitor State */ /* Place the Board in the Monitor State */
if (!((*proc_obj->intf_fxns->pfn_brd_monitor) if (!((*proc_obj->intf_fxns->brd_monitor)
(proc_obj->hbridge_context))) { (proc_obj->hbridge_context))) {
status = 0; status = 0;
if (!((*proc_obj->intf_fxns->pfn_brd_status) if (!((*proc_obj->intf_fxns->pfn_brd_status)

View file

@ -344,7 +344,7 @@ int strm_get_info(struct strm_object *stream_obj,
stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx; stream_info->user_strm->ul_number_bytes = chnl_info_obj.bytes_tx;
stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj; stream_info->user_strm->sync_object_handle = chnl_info_obj.event_obj;
/* Determine stream state based on channel state and info */ /* Determine stream state based on channel state and info */
if (chnl_info_obj.dw_state & CHNL_STATEEOS) { if (chnl_info_obj.state & CHNL_STATEEOS) {
stream_info->user_strm->ss_stream_state = STREAM_DONE; stream_info->user_strm->ss_stream_state = STREAM_DONE;
} else { } else {
if (chnl_info_obj.cio_cs > 0) if (chnl_info_obj.cio_cs > 0)