1
0
Fork 0

[SPARC64]: Temporarily remove IOMMU merging code.

Changeset fde6a3c82d ("iommu sg merging:
sparc64: make iommu respect the segment size limits") broke sparc64
because whilst it added the segment limiting code to the first pass of
SG mapping (in prepare_sg()) it did not add matching code to the
second pass handling (in fill_sg())

As a result the two passes disagree where the segment boundaries
should be, resulting in OOPSes, DMA corruption, and corrupted
superblocks.

Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
David S. Miller 2008-02-06 03:50:26 -08:00 committed by David S. Miller
parent b3ff81dd8a
commit 38192d52f1
6 changed files with 95 additions and 499 deletions

View File

@ -11,7 +11,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
traps.o auxio.o una_asm.o sysfs.o iommu.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
power.o sbus.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@ -472,94 +472,15 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
int nused, int nelems,
unsigned long iopte_protection)
{
struct scatterlist *dma_sg = sg;
int i;
for (i = 0; i < nused; i++) {
unsigned long pteval = ~0UL;
u32 dma_npages;
dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
dma_sg->dma_length +
((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
do {
unsigned long offset;
signed int len;
/* If we are here, we know we have at least one
* more page to map. So walk forward until we
* hit a page crossing, and begin creating new
* mappings from that spot.
*/
for (;;) {
unsigned long tmp;
tmp = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
pteval = tmp & IO_PAGE_MASK;
offset = tmp & (IO_PAGE_SIZE - 1UL);
break;
}
if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
offset = 0UL;
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
sg = sg_next(sg);
nelems--;
}
pteval = iopte_protection | (pteval & IOPTE_PAGE);
while (len > 0) {
*iopte++ = __iopte(pteval);
pteval += IO_PAGE_SIZE;
len -= (IO_PAGE_SIZE - offset);
offset = 0;
dma_npages--;
}
pteval = (pteval & IOPTE_PAGE) + len;
sg = sg_next(sg);
nelems--;
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
while (nelems &&
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
sg = sg_next(sg);
nelems--;
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
dma_sg = sg_next(dma_sg);
}
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, ctx, i, npages, iopte_protection;
struct scatterlist *sg;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, iopte_protection;
struct iommu *iommu;
iopte_t *base;
u32 dma_base;
struct scatterlist *sgtmp;
int used;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@ -578,11 +499,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
/* Step 1: Prepare scatter list. */
npages = prepare_sg(dev, sglist, nelems);
/* Step 2: Allocate a cluster and context, if necessary. */
npages = calc_npages(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags);
@ -599,18 +516,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT);
/* Step 3: Normalize DMA addresses. */
used = nelems;
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
/* Step 4: Create the mappings. */
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@ -618,13 +523,27 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
fill_sg(base, sglist, used, nelems, iopte_protection);
for_each_sg(sglist, sg, nelems, i) {
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
unsigned long slen = sg->length;
unsigned long this_npages;
#ifdef VERIFY_SG
verify_sglist(sglist, nelems, base, npages);
#endif
this_npages = iommu_num_pages(paddr, slen);
return used;
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
sg->dma_length = slen;
paddr &= IO_PAGE_MASK;
while (this_npages--) {
iopte_val(*base) = iopte_protection | paddr;
base++;
paddr += IO_PAGE_SIZE;
dma_base += IO_PAGE_SIZE;
}
}
return nelems;
bad:
iommu_free_ctx(iommu, ctx);
@ -637,11 +556,10 @@ bad_no_ctx:
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, ctx, i, npages;
struct scatterlist *sg, *sgprv;
struct strbuf *strbuf;
struct iommu *iommu;
iopte_t *base;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) {
@ -654,15 +572,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = sglist->dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
npages = calc_npages(sglist, nelems);
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

View File

@ -1,248 +0,0 @@
/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
* iommu_common.c: UltraSparc SBUS/PCI common iommu code.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/dma-mapping.h>
#include "iommu_common.h"
/* You are _strongly_ advised to enable the following debugging code
* any time you make changes to the sg code below, run it for a while
* with filesystems mounted read-only before buying the farm... -DaveM
*/
#ifdef VERIFY_SG
static int verify_lengths(struct scatterlist *sglist, int nents, int npages)
{
int sg_len, dma_len;
int i, pgcount;
struct scatterlist *sg;
sg_len = 0;
for_each_sg(sglist, sg, nents, i)
sg_len += sg->length;
dma_len = 0;
for_each_sg(sglist, sg, nents, i) {
if (!sg->dma_length)
break;
dma_len += sg->dma_length;
}
if (sg_len != dma_len) {
printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
sg_len, dma_len);
return -1;
}
pgcount = 0;
for_each_sg(sglist, sg, nents, i) {
unsigned long start, end;
if (!sg->dma_length)
break;
start = sg->dma_address;
start = start & IO_PAGE_MASK;
end = sg->dma_address + sg->dma_length;
end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
pgcount += ((end - start) >> IO_PAGE_SHIFT);
}
if (pgcount != npages) {
printk("verify_lengths: Error, page count wrong, "
"npages[%d] pgcount[%d]\n",
npages, pgcount);
return -1;
}
/* This test passes... */
return 0;
}
static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
{
struct scatterlist *sg = *__sg;
iopte_t *iopte = *__iopte;
u32 dlen = dma_sg->dma_length;
u32 daddr;
unsigned int sglen;
unsigned long sgaddr;
daddr = dma_sg->dma_address;
sglen = sg->length;
sgaddr = (unsigned long) sg_virt(sg);
while (dlen > 0) {
unsigned long paddr;
/* SG and DMA_SG must begin at the same sub-page boundary. */
if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
printk("verify_one_map: Wrong start offset "
"sg[%08lx] dma[%08x]\n",
sgaddr, daddr);
nents = -1;
goto out;
}
/* Verify the IOPTE points to the right page. */
paddr = iopte_val(*iopte) & IOPTE_PAGE;
if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
printk("verify_one_map: IOPTE[%08lx] maps the "
"wrong page, should be [%08lx]\n",
iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
nents = -1;
goto out;
}
/* If this SG crosses a page, adjust to that next page
* boundary and loop.
*/
if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
unsigned long next_page, diff;
next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
diff = next_page - sgaddr;
sgaddr += diff;
daddr += diff;
sglen -= diff;
dlen -= diff;
if (dlen > 0)
iopte++;
continue;
}
/* SG wholly consumed within this page. */
daddr += sglen;
dlen -= sglen;
if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
iopte++;
sg = sg_next(sg);
if (--nents <= 0)
break;
sgaddr = (unsigned long) sg_virt(sg);
sglen = sg->length;
}
if (dlen < 0) {
/* Transfer overrun, big problems. */
printk("verify_one_map: Transfer overrun by %d bytes.\n",
-dlen);
nents = -1;
} else {
/* Advance to next dma_sg implies that the next iopte will
* begin it.
*/
iopte++;
}
out:
*__sg = sg;
*__iopte = iopte;
return nents;
}
static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
{
struct scatterlist *dma_sg = sg;
struct scatterlist *orig_dma_sg = dma_sg;
int orig_nents = nents;
for (;;) {
nents = verify_one_map(dma_sg, &sg, nents, &iopte);
if (nents <= 0)
break;
dma_sg = sg_next(dma_sg);
if (dma_sg->dma_length == 0)
break;
}
if (nents > 0) {
printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
nents);
return -1;
}
if (nents < 0) {
printk("verify_maps: Error, messed up mappings, "
"at sg %d dma_sg %d\n",
(int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
return -1;
}
/* This test passes... */
return 0;
}
void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int npages)
{
struct scatterlist *sg;
if (verify_lengths(sglist, nents, npages) < 0 ||
verify_maps(sglist, nents, iopte) < 0) {
int i;
printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
printk("%016lx.\n", sglist->dma_address & IO_PAGE_MASK);
for_each_sg(sglist, sg, nents, i) {
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
"dma_address[%016x] dma_length[%016x]\n",
i,
page_address(sg_page(sg)), sg->offset,
sg->length,
sg->dma_address, sg->dma_length);
}
}
/* Seems to be ok */
}
#endif
unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents)
{
struct scatterlist *dma_sg = sg;
unsigned long prev;
u32 dent_addr, dent_len;
unsigned int max_seg_size;
prev = (unsigned long) sg_virt(sg);
prev += (unsigned long) (dent_len = sg->length);
dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
max_seg_size = dma_get_max_seg_size(dev);
while (--nents) {
unsigned long addr;
sg = sg_next(sg);
addr = (unsigned long) sg_virt(sg);
if (! VCONTIG(prev, addr) ||
dent_len + sg->length > max_seg_size) {
dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len;
dma_sg = sg_next(dma_sg);
dent_addr = ((dent_addr +
dent_len +
(IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
dent_addr <<= IO_PAGE_SHIFT;
dent_addr += addr & (IO_PAGE_SIZE - 1UL);
dent_len = 0;
}
dent_len += sg->length;
prev = addr + sg->length;
}
dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len;
if (dma_sg != sg) {
dma_sg = sg_next(dma_sg);
dma_sg->dma_length = 0;
}
return ((unsigned long) dent_addr +
(unsigned long) dent_len +
(IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
}

View File

@ -30,6 +30,32 @@
*/
#define IOMMU_PAGE_SHIFT 13
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static inline unsigned long iommu_num_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
npages = IO_PAGE_ALIGN(vaddr + slen) - (vaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
return npages;
}
static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
{
unsigned long i, npages = 0;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i) {
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
npages += iommu_num_pages(paddr, sg->length);
}
return npages;
}
/* You are _strongly_ advised to enable the following debugging code
* any time you make changes to the sg code below, run it for a while
* with filesystems mounted read-only before buying the farm... -DaveM

View File

@ -365,113 +365,14 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static long fill_sg(long entry, struct device *dev,
struct scatterlist *sg,
int nused, int nelems, unsigned long prot)
{
struct scatterlist *dma_sg = sg;
unsigned long flags;
int i;
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
for (i = 0; i < nused; i++) {
unsigned long pteval = ~0UL;
u32 dma_npages;
dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
dma_sg->dma_length +
((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
do {
unsigned long offset;
signed int len;
/* If we are here, we know we have at least one
* more page to map. So walk forward until we
* hit a page crossing, and begin creating new
* mappings from that spot.
*/
for (;;) {
unsigned long tmp;
tmp = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
pteval = tmp & IO_PAGE_MASK;
offset = tmp & (IO_PAGE_SIZE - 1UL);
break;
}
if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
offset = 0UL;
len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
break;
}
sg = sg_next(sg);
nelems--;
}
pteval = (pteval & IOPTE_PAGE);
while (len > 0) {
long err;
err = iommu_batch_add(pteval);
if (unlikely(err < 0L))
goto iommu_map_failed;
pteval += IO_PAGE_SIZE;
len -= (IO_PAGE_SIZE - offset);
offset = 0;
dma_npages--;
}
pteval = (pteval & IOPTE_PAGE) + len;
sg = sg_next(sg);
nelems--;
/* Skip over any tail mappings we've fully mapped,
* adjusting pteval along the way. Stop when we
* detect a page crossing event.
*/
while (nelems &&
(pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
(pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
((pteval ^
(SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
pteval += sg->length;
sg = sg_next(sg);
nelems--;
}
if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
pteval = ~0UL;
} while (dma_npages != 0);
dma_sg = sg_next(dma_sg);
}
if (unlikely(iommu_batch_end() < 0L))
goto iommu_map_failed;
local_irq_restore(flags);
return 0;
iommu_map_failed:
local_irq_restore(flags);
return -1L;
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
unsigned long flags, npages, i, prot;
struct scatterlist *sg;
struct iommu *iommu;
unsigned long flags, npages, prot;
u32 dma_base;
struct scatterlist *sgtmp;
long entry, err;
int used;
u32 dma_base;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@ -489,10 +390,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad;
/* Step 1: Prepare scatter list. */
npages = prepare_sg(dev, sglist, nelems);
npages = calc_npages(sglist, nelems);
/* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
@ -503,27 +402,45 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
/* Step 3: Normalize DMA addresses. */
used = nelems;
sgtmp = sglist;
while (used && sgtmp->dma_length) {
sgtmp->dma_address += dma_base;
sgtmp = sg_next(sgtmp);
used--;
}
used = nelems - used;
/* Step 4: Create the mappings. */
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
err = fill_sg(entry, dev, sglist, used, nelems, prot);
local_irq_save(flags);
iommu_batch_start(dev, prot, entry);
for_each_sg(sglist, sg, nelems, i) {
unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
unsigned long slen = sg->length;
unsigned long this_npages;
this_npages = iommu_num_pages(paddr, slen);
sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
sg->dma_length = slen;
paddr &= IO_PAGE_MASK;
while (this_npages--) {
err = iommu_batch_add(paddr);
if (unlikely(err < 0L)) {
local_irq_restore(flags);
goto iommu_map_failed;
}
paddr += IO_PAGE_SIZE;
dma_base += IO_PAGE_SIZE;
}
}
err = iommu_batch_end();
local_irq_restore(flags);
if (unlikely(err < 0L))
goto iommu_map_failed;
return used;
return nelems;
bad:
if (printk_ratelimit())
@ -541,12 +458,11 @@ iommu_map_failed:
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
unsigned long flags, npages;
struct pci_pbm_info *pbm;
struct iommu *iommu;
unsigned long flags, i, npages;
struct scatterlist *sg, *sgprv;
long entry;
u32 devhandle, bus_addr;
struct iommu *iommu;
long entry;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
@ -558,16 +474,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
bus_addr) >> IO_PAGE_SHIFT;
npages = calc_npages(sglist, nelems);
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

View File

@ -16,7 +16,7 @@
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define BIO_VMERGE_BOUNDARY 8192
#define BIO_VMERGE_BOUNDARY 0
static inline u8 _inb(unsigned long addr)
{