IA64: sg chaining support

This updates the ia64 iommu/pci dma mappers to sg chaining.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Jens Axboe 2007-10-16 11:27:26 +02:00
parent 46856afa01
commit 9b6eccfccb
4 changed files with 16 additions and 13 deletions

View file

@ -396,7 +396,7 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
startsg->dma_address, startsg->dma_length,
sba_sg_address(startsg));
startsg++;
startsg = sg_next(startsg);
}
}
@ -409,7 +409,7 @@ sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
while (the_nents-- > 0) {
if (sba_sg_address(the_sg) == 0x0UL)
sba_dump_sg(NULL, startsg, nents);
the_sg++;
the_sg = sg_next(the_sg);
}
}
@ -1201,7 +1201,7 @@ sba_fill_pdir(
u32 pide = startsg->dma_address & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~iovp_mask;
startsg->dma_address = 0;
dma_sg++;
dma_sg = sg_next(dma_sg);
dma_sg->dma_address = pide | ioc->ibase;
pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
n_mappings++;
@ -1228,7 +1228,7 @@ sba_fill_pdir(
pdirp++;
} while (cnt > 0);
}
startsg++;
startsg = sg_next(startsg);
}
/* force pdir update */
wmb();
@ -1297,7 +1297,7 @@ sba_coalesce_chunks( struct ioc *ioc,
while (--nents > 0) {
unsigned long vaddr; /* tmp */
startsg++;
startsg = sg_next(startsg);
/* PARANOID */
startsg->dma_address = startsg->dma_length = 0;
@ -1407,7 +1407,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
#ifdef ALLOW_IOV_BYPASS_SG
ASSERT(to_pci_dev(dev)->dma_mask);
if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
for (sg = sglist ; filled < nents ; filled++, sg++){
for_each_sg(sglist, sg, nents, filled) {
sg->dma_length = sg->length;
sg->dma_address = virt_to_phys(sba_sg_address(sg));
}
@ -1501,7 +1501,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
while (nents && sglist->dma_length) {
sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
sglist++;
sglist = sg_next(sglist);
nents--;
}

View file

@ -218,16 +218,17 @@ EXPORT_SYMBOL(sn_dma_unmap_single);
*
* Unmap a set of streaming mode DMA translations.
*/
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, int direction)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
struct scatterlist *sg;
BUG_ON(dev->bus != &pci_bus_type);
for (i = 0; i < nhwentries; i++, sg++) {
for_each_sg(sgl, sg, nhwentries, i) {
provider->dma_unmap(pdev, sg->dma_address, direction);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
@ -244,11 +245,11 @@ EXPORT_SYMBOL(sn_dma_unmap_sg);
*
* Maps each entry of @sg for DMA.
*/
int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
int direction)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sg;
struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
@ -258,7 +259,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
/*
* Setup a DMA address for each entry in the scatterlist.
*/
for (i = 0; i < nhwentries; i++, sg++) {
for_each_sg(sgl, sg, nhwentries, i) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = provider->dma_map(pdev,
phys_addr, sg->length,

View file

@ -6,7 +6,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/machvec.h>
#include <asm/scatterlist.h>
#include <linux/scatterlist.h>
#define dma_alloc_coherent platform_dma_alloc_coherent
/* coherent mem. is cheap */

View file

@ -30,4 +30,6 @@ struct scatterlist {
#define sg_dma_len(sg) ((sg)->dma_length)
#define sg_dma_address(sg) ((sg)->dma_address)
#define ARCH_HAS_SG_CHAIN
#endif /* _ASM_IA64_SCATTERLIST_H */