1
0
Fork 0

parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs

Enabling parport pc driver on a B2600 (and probably other 64bit PARISC
systems) produced following BUG:

CPU: 0 PID: 1 Comm: swapper Not tainted 4.12.0-rc5-30198-g1132d5e #156
task: 000000009e050000 task.stack: 000000009e04c000

     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI
PSW: 00001000000001101111111100001111 Not tainted
r00-03  000000ff0806ff0f 000000009e04c990 0000000040871b78 000000009e04cac0
r04-07  0000000040c14de0 ffffffffffffffff 000000009e07f098 000000009d82d200
r08-11  000000009d82d210 0000000000000378 0000000000000000 0000000040c345e0
r12-15  0000000000000005 0000000040c345e0 0000000000000000 0000000040c9d5e0
r16-19  0000000040c345e0 00000000f00001c4 00000000f00001bc 0000000000000061
r20-23  000000009e04ce28 0000000000000010 0000000000000010 0000000040b89e40
r24-27  0000000000000003 0000000000ffffff 000000009d82d210 0000000040c14de0
r28-31  0000000000000000 000000009e04ca90 000000009e04cb40 0000000000000000
sr00-03  0000000000000000 0000000000000000 0000000000000000 0000000000000000
sr04-07  0000000000000000 0000000000000000 0000000000000000 0000000000000000

IASQ: 0000000000000000 0000000000000000 IAOQ: 00000000404aece0 00000000404aece4
 IIR: 03ffe01f    ISR: 0000000010340000  IOR: 000001781304cac8
 CPU:        0   CR30: 000000009e04c000 CR31: 00000000e2976de2
 ORIG_R28: 0000000000000200
 IAOQ[0]: sba_dma_supported+0x80/0xd0
 IAOQ[1]: sba_dma_supported+0x84/0xd0
 RP(r2): parport_pc_probe_port+0x178/0x1200

Cause is a call to dma_coerce_mask_and_coherenet in parport_pc_probe_port,
which PARISC DMA API doesn't handle very nicely. This commit gives back
DMA_ERROR_CODE for DMA API calls, if device isn't capable of DMA
transaction.

Cc: <stable@vger.kernel.org> # v3.13+
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Helge Deller <deller@gmx.de>
zero-colors
Thomas Bogendoerfer 2017-07-03 10:38:05 +02:00 committed by Helge Deller
parent 247462316f
commit 33f9e02495
5 changed files with 41 additions and 7 deletions

View File

@ -20,6 +20,8 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
#define DMA_ERROR_CODE (~(dma_addr_t)0)
#ifdef CONFIG_PA11
extern const struct dma_map_ops pcxl_dma_ops;
extern const struct dma_map_ops pcx_dma_ops;
@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev)
break;
}
}
BUG_ON(!dev->platform_data);
return dev->platform_data;
}
#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
#define GET_IOC(dev) ({ \
void *__pdata = parisc_walk_tree(dev); \
__pdata ? HBA_DATA(__pdata)->iommu : NULL; \
})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;

View File

@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);

View File

@ -154,7 +154,10 @@ struct dino_device
};
/* Looks nice and keeps the compiler happy */
#define DINO_DEV(d) ((struct dino_device *) d)
#define DINO_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct dino_device *)__pdata; })
/*

View File

@ -111,8 +111,10 @@ static u32 lba_t32;
/* Looks nice and keeps the compiler happy */
#define LBA_DEV(d) ((struct lba_device *) (d))
#define LBA_DEV(d) ({ \
void *__pdata = d; \
BUG_ON(!__pdata); \
(struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA

View File

@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
return 0;
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/*
* check if mask is >= than the current max IO Virt Address
@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
int pide;
ioc = GET_IOC(dev);
if (!ioc)
return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
if (!ioc)
return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
if (!ioc) {
WARN_ON(!ioc);
return;
}
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;