1
0
Fork 0

dmaengine: hsu: speed up residue calculation

There is no need to calculate an overall length of the descriptor each time we
call for DMA transfer status. Instead we do this at descriptor allocation stage
and keep the stored length for further usage.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
hifive-unleashed-5.1
Andy Shevchenko 2015-11-17 18:00:30 +02:00 committed by Vinod Koul
parent f94cf9f4c5
commit f0579c8cea
2 changed files with 5 additions and 13 deletions

View File

@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
desc->sg[i].addr = sg_dma_address(sg);
desc->sg[i].len = sg_dma_len(sg);
desc->length += sg_dma_len(sg);
}
desc->nents = sg_len;
@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
}
static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
{
size_t bytes = 0;
unsigned int i;
for (i = desc->active; i < desc->nents; i++)
bytes += desc->sg[i].len;
return bytes;
}
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{
struct hsu_dma_desc *desc = hsuc->desc;
size_t bytes = hsu_dma_desc_size(desc);
size_t bytes = desc->length;
int i;
i = desc->active % HSU_DMA_CHAN_NR_DESC;
@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
dma_set_residue(state, bytes);
status = hsuc->desc->status;
} else if (vdesc) {
bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc));
bytes = to_hsu_dma_desc(vdesc)->length;
dma_set_residue(state, bytes);
}
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);

View File

@ -65,6 +65,7 @@ struct hsu_dma_desc {
enum dma_transfer_direction direction;
struct hsu_dma_sg *sg;
unsigned int nents;
size_t length;
unsigned int active;
enum dma_status status;
};