Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart

* master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart:
  [AGPGART] x86_64: Enable VIA AGP driver on x86-64 for VIA P4 chipsets
  [AGPGART] x86_64: Fix wrong PCI ID for ALI M1695 AGP bridge
  [AGPGART] ATI RS350 support.
  [AGPGART] Lots of CodingStyle/whitespace cleanups.
This commit is contained in:
Linus Torvalds 2006-03-25 09:17:32 -08:00
commit 2e9abdd9ba
19 changed files with 103 additions and 106 deletions

View file

@ -103,7 +103,7 @@ config AGP_SWORKS
config AGP_VIA
tristate "VIA chipset support"
depends on AGP && X86_32
depends on AGP
help
This option gives you AGP support for the GLX component of
X on VIA MVP3/Apollo Pro chipsets.

View file

@ -104,8 +104,7 @@ struct agp_bridge_driver {
void (*agp_enable)(struct agp_bridge_data *, u32);
void (*cleanup)(void);
void (*tlb_flush)(struct agp_memory *);
unsigned long (*mask_memory)(struct agp_bridge_data *,
unsigned long, int);
unsigned long (*mask_memory)(struct agp_bridge_data *, unsigned long, int);
void (*cache_flush)(void);
int (*create_gatt_table)(struct agp_bridge_data *);
int (*free_gatt_table)(struct agp_bridge_data *);

View file

@ -23,7 +23,8 @@ static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
dma_addr = address - vma->vm_start + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL) return NULL; /* no translation */
if (pa == (unsigned long)-EINVAL)
return NULL; /* no translation */
/*
* Get the page, inc the use count, and return it
@ -98,7 +99,8 @@ static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries) return -EINVAL;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
status = agp->ops->bind(agp, pg_start, mem);
mb();

View file

@ -216,7 +216,7 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
{256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
{128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
{64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
{32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
{32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
};
static int amd_8151_configure(void)

View file

@ -74,7 +74,7 @@ static int ati_create_page_map(ati_page_map *page_map)
/*CACHE_FLUSH();*/
global_cache_flush();
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
@ -99,7 +99,7 @@ static void ati_free_gatt_pages(void)
ati_page_map *entry;
tables = ati_generic_private.gatt_pages;
for(i = 0; i < ati_generic_private.num_tables; i++) {
for (i = 0; i < ati_generic_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL)
@ -387,7 +387,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
@ -466,6 +466,10 @@ static struct agp_device_ids ati_agp_device_ids[] __devinitdata =
.device_id = PCI_DEVICE_ID_ATI_RS300_200,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_200,
.chipset_name = "IGP9100/M",
},
{ }, /* dummy final entry, always present */
};

View file

@ -373,7 +373,7 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if(pci_assign_resource(pdev, 0)) {
if (pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
return -ENODEV;
}

View file

@ -781,7 +781,7 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv)
if (agp_fe.current_controller != NULL)
return -EBUSY;
if(!agp_bridge)
if (!agp_bridge)
return -ENODEV;
if (atomic_read(&agp_bridge->agp_in_use))

View file

@ -85,8 +85,8 @@ static int __init hp_zx1_ioc_shared(void)
/*
* IOC already configured by sba_iommu module; just use
* its setup. We assume:
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
*/
hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
switch (hp->io_tlb_ps) {
@ -115,7 +115,7 @@ static int __init hp_zx1_ioc_shared(void)
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
/* Normal case when no AGP device in system */
hp->gatt = NULL;
hp->gatt = NULL;
hp->gatt_entries = 0;
printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
"GART disabled\n");

View file

@ -400,10 +400,10 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
if (end > i460.lp_desc + num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
@ -458,10 +458,10 @@ static int i460_remove_memory_large_io_page (struct agp_memory *mem,
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
for (i = 0, lp = start; lp <= end; ++lp) {
for (idx = ((lp == start) ? start_offset : 0);

View file

@ -201,9 +201,9 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries) {
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
}
for (j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
return -EBUSY;
@ -221,7 +221,7 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
agp_bridge->driver->tlb_flush(mem);
return 0;
}
if((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
goto insert;
return -EINVAL;
}
@ -328,7 +328,7 @@ static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
static void intel_i810_free_by_type(struct agp_memory *curr)
{
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
if (curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else {
@ -1603,11 +1603,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
name = "i820";
break;
case PCI_DEVICE_ID_INTEL_82830_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82830_CGC)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82830_CGC))
bridge->driver = &intel_830_driver;
} else {
else
bridge->driver = &intel_830mp_driver;
}
name = "830M";
break;
case PCI_DEVICE_ID_INTEL_82840_HB:
@ -1619,11 +1618,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
name = "i845";
break;
case PCI_DEVICE_ID_INTEL_82845G_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82845G_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82845G_IG))
bridge->driver = &intel_830_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "845G";
break;
case PCI_DEVICE_ID_INTEL_82850_HB:
@ -1648,11 +1646,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
name = "i860";
break;
case PCI_DEVICE_ID_INTEL_82865_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82865_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82865_IG))
bridge->driver = &intel_830_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "865";
break;
case PCI_DEVICE_ID_INTEL_82875_HB:
@ -1660,35 +1657,31 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
name = "i875";
break;
case PCI_DEVICE_ID_INTEL_82915G_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82915G_IG))
bridge->driver = &intel_915_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "915G";
break;
case PCI_DEVICE_ID_INTEL_82915GM_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82915GM_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82915GM_IG))
bridge->driver = &intel_915_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "915GM";
break;
case PCI_DEVICE_ID_INTEL_82945G_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82945G_IG))
bridge->driver = &intel_915_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "945G";
break;
case PCI_DEVICE_ID_INTEL_82945GM_HB:
if (find_i830(PCI_DEVICE_ID_INTEL_82945GM_IG)) {
if (find_i830(PCI_DEVICE_ID_INTEL_82945GM_IG))
bridge->driver = &intel_915_driver;
} else {
else
bridge->driver = &intel_845_driver;
}
name = "945GM";
break;
case PCI_DEVICE_ID_INTEL_7505_0:
@ -1724,7 +1717,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if(pci_assign_resource(pdev, 0)) {
if (pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;

View file

@ -26,7 +26,7 @@ static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *ne
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_5_dev, list);
if(cur->maxbw > n->maxbw)
if (cur->maxbw > n->maxbw)
break;
}
list_add_tail(new, pos);
@ -214,7 +214,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
* many writes on the AGP bus).
*/
master[cdev].rq = master[cdev].n;
if(master[cdev].y > 0x1)
if (master[cdev].y > 0x1)
master[cdev].rq *= (1 << (master[cdev].y - 1));
tot_rq += master[cdev].rq;
@ -366,7 +366,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
case 0x0300: /* Display controller */
case 0x0400: /* Multimedia controller */
if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto free_and_exit;
}

View file

@ -72,7 +72,7 @@ static int nvidia_init_iorr(u32 base, u32 size)
/* Find the iorr that is already used for the base */
/* If not found, determine the uppermost available iorr */
free_iorr_addr = AMD_K7_NUM_IORR;
for(iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
@ -139,7 +139,7 @@ static int nvidia_configure(void)
}
/* attbase */
for(i = 0; i < 8; i++) {
for (i = 0; i < 8; i++) {
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
(agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
}
@ -205,7 +205,7 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
return -EINVAL;
for(j = pg_start; j < (pg_start + mem->page_count); j++) {
for (j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
return -EBUSY;
}
@ -264,9 +264,9 @@ static void nvidia_tlbflush(struct agp_memory *mem)
}
/* flush TLB entries */
for(i = 0; i < 32 + 1; i++)
for (i = 0; i < 32 + 1; i++)
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
for(i = 0; i < 32 + 1; i++)
for (i = 0; i < 32 + 1; i++)
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
}

View file

@ -329,7 +329,7 @@ static int __devinit agp_sgi_init(void)
static void __devexit agp_sgi_cleanup(void)
{
if(sgi_tioca_agp_bridges)
if (sgi_tioca_agp_bridges)
kfree(sgi_tioca_agp_bridges);
sgi_tioca_agp_bridges=NULL;
}

View file

@ -121,7 +121,7 @@ static struct aper_size_info_8 sis_generic_sizes[7] =
static struct agp_bridge_driver sis_driver = {
.owner = THIS_MODULE,
.aperture_sizes = sis_generic_sizes,
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.configure = sis_configure,
@ -243,11 +243,11 @@ static void __devinit sis_get_driver(struct agp_bridge_data *bridge)
{
int i;
for(i=0; sis_broken_chipsets[i]!=0; ++i)
if(bridge->dev->device==sis_broken_chipsets[i])
for (i=0; sis_broken_chipsets[i]!=0; ++i)
if (bridge->dev->device==sis_broken_chipsets[i])
break;
if(sis_broken_chipsets[i] || agp_sis_force_delay)
if (sis_broken_chipsets[i] || agp_sis_force_delay)
sis_driver.agp_enable=sis_delayed_enable;
// sis chipsets that indicate less than agp3.5

View file

@ -64,7 +64,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
}
global_cache_flush();
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
writel(agp_bridge->scratch_page, page_map->remapped+i);
return 0;
@ -84,7 +84,7 @@ static void serverworks_free_gatt_pages(void)
struct serverworks_page_map *entry;
tables = serverworks_private.gatt_pages;
for(i = 0; i < serverworks_private.num_tables; i++) {
for (i = 0; i < serverworks_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL) {
@ -161,7 +161,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
return retval;
}
/* Create a fake scratch directory */
for(i = 0; i < 1024; i++) {
for (i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
@ -186,8 +186,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++)
for (i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;