1
0
Fork 0

dmaengine: ioatdma: PQ err descriptors should callback with err results

The err completion callback is missing from the error handler. Two
reasons we never hit this. On Xeon because the hw err workaround, the
completion happens on a NULL descriptor so we don't do callback on the
PQ descriptor. On Atom we have DWBES support and thus the callback already
happened or we don't halt on error, so that was take cared of. But this code
needs to be corrected for future error handlers.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
hifive-unleashed-5.1
Dave Jiang 2014-12-11 09:13:42 -07:00 committed by Vinod Koul
parent 681d15ecd7
commit abf538ae03
1 changed files with 11 additions and 0 deletions

View File

@ -489,6 +489,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
struct ioat_chan_common *chan = &ioat->base;
struct pci_dev *pdev = to_pdev(chan);
struct ioat_dma_descriptor *hw;
struct dma_async_tx_descriptor *tx;
u64 phys_complete;
struct ioat_ring_ent *desc;
u32 err_handled = 0;
@ -534,6 +535,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
__func__, chanerr, err_handled);
BUG();
} else { /* cleanup the faulty descriptor */
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
}
}
}
writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);