Merge branch 'pci/resource-mmap' into next

* pci/resource-mmap:
  ia64: Use generic pci_mmap_resource_range()
  ia64: Remove redundant checks for WC in pci_mmap_page_range()
  ia64: Remove redundant valid_mmap_phys_addr_range() from pci_mmap_page_range()
  PCI: Add I/O BAR support to generic pci_mmap_resource_range()
  x86/PCI: Use generic pci_mmap_resource_range()
  unicore32/PCI: Use generic pci_mmap_resource_range()
  sh/PCI: Use generic pci_mmap_resource_range()
  parisc: Use generic pci_mmap_resource_range()
  mn10300/PCI: Use generic pci_mmap_resource_range()
  MIPS: PCI: Use generic pci_mmap_resource_range()
  cris/PCI: Use generic pci_mmap_resource_range()
  ARM/PCI: Use generic pci_mmap_resource_range()
  PCI: Add pci_mmap_resource_range() and use it for ARM64
  PCI: Add BAR index argument to pci_mmap_page_range()
  PCI: Use BAR index in sysfs attr->private instead of resource pointer
  PCI: Add arch_can_pci_mmap_io() on architectures which can mmap() I/O space
  PCI: Move multiple declarations of pci_mmap_page_range() to <linux/pci.h>
  PCI: Add arch_can_pci_mmap_wc() macro
  xtensa/PCI: Do not mmap PCI BARs to userspace as write-through
  PCI: Only allow WC mmap on prefetchable resources
  PCI: Fix another sanity check bug in /proc/pci mmap
  PCI: Fix pci_mmap_fits() for HAVE_PCI_RESOURCE_TO_USER platforms
This commit is contained in:
Bjorn Helgaas 2017-04-28 10:34:34 -05:00
commit 889e4dd916
34 changed files with 238 additions and 385 deletions

View file

@ -113,9 +113,18 @@ Supporting PCI access on new platforms
--------------------------------------
In order to support PCI resource mapping as described above, Linux platform
code must define HAVE_PCI_MMAP and provide a pci_mmap_page_range function.
Platforms are free to only support subsets of the mmap functionality, but
useful return codes should be provided.
code should ideally define ARCH_GENERIC_PCI_MMAP_RESOURCE and use the generic
implementation of that functionality. To support the historical interface of
mmap() through files in /proc/bus/pci, platforms may also set HAVE_PCI_MMAP.
Alternatively, platforms which set HAVE_PCI_MMAP may provide their own
implementation of pci_mmap_page_range() instead of defining
ARCH_GENERIC_PCI_MMAP_RESOURCE.
Platforms which support write-combining maps of PCI resources must define
arch_can_pci_mmap_wc() which shall evaluate to non-zero at runtime when
write-combining is permitted. Platforms which support maps of I/O resources
define arch_can_pci_mmap_io() similarly.
Legacy resources are protected by the HAVE_PCI_LEGACY define. Platforms
wishing to support legacy functionality should define it and provide

View file

@ -29,8 +29,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{

View file

@ -597,25 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
void __init pci_map_io_early(unsigned long pfn)
{
struct map_desc pci_io_desc = {

View file

@ -22,6 +22,8 @@
*/
#define PCI_DMA_BUS_IS_PHYS (0)
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
extern int isa_dma_bridge_buggy;
#ifdef CONFIG_PCI

View file

@ -14,28 +14,6 @@ void pcibios_set_master(struct pci_dev *dev)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
prot = pgprot_val(vma->vm_page_prot);
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored, it is changed via the mtrr
* interfaces on this platform.
*/
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)

View file

@ -42,9 +42,7 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */

View file

@ -51,8 +51,9 @@ extern unsigned long ia64_max_iommu_merge_mask;
#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL)
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define arch_can_pci_mmap_wc() 1
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,

View file

@ -418,52 +418,6 @@ pcibios_align_resource (void *data, const struct resource *res,
return res->start;
}
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long size = vma->vm_end - vma->vm_start;
pgprot_t prot;
/*
* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
/*
* XXX we could relax this for I/O spaces for which ACPI
* indicates that the space is 1-to-1 mapped. But at the
* moment, we don't support multiple PCI address spaces and
* the legacy I/O space is not 1-to-1 mapped, so this is moot.
*/
return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
vma->vm_page_prot);
/*
* If the user requested WC, the kernel uses UC or WC for this region,
* and the chipset supports WC, we can use WC. Otherwise, we have to
* use the same attribute the kernel uses.
*/
if (write_combine &&
((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
(pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* ia64_pci_get_legacy_mem - generic legacy mem routine
* @bus: bus to get legacy memory base address for

View file

@ -46,12 +46,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);

View file

@ -278,7 +278,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
int pci_mmap_page_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =

View file

@ -110,10 +110,7 @@ extern unsigned long PCIBIOS_MIN_MEM;
extern void pcibios_set_master(struct pci_dev *dev);
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
/*

View file

@ -57,27 +57,3 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
*start = fixup_bigphys_addr(rsrc->start, size);
*end = rsrc->start + size;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}

View file

@ -74,9 +74,7 @@ static inline int pci_controller_num(struct pci_dev *dev)
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */

View file

@ -210,26 +210,3 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
vma->vm_flags |= VM_LOCKED;
prot = pgprot_val(vma->vm_page_prot);
prot &= ~_PAGE_CACHE;
vma->vm_page_prot = __pgprot(prot);
/* Write-combine setting is ignored */
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}

View file

@ -200,8 +200,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __ASM_PARISC_PCI_H */

View file

@ -227,34 +227,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return start;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
if (write_combine)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
prot = pgprot_val(vma->vm_page_prot);
prot |= _PAGE_NO_CACHE;
vma->vm_page_prot = __pgprot(prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.

View file

@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus);
extern int pci_proc_domain(struct pci_bus *bus);
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */
#define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
#define arch_can_pci_mmap_wc() 1
extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
size_t count);

View file

@ -521,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
int pci_mmap_page_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t offset =

View file

@ -269,27 +269,6 @@ void __ref pcibios_report_status(unsigned int status_mask, int warn)
}
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
/*
* I/O space can be accessed via normal processor loads and stores on
* this platform but for now we elect not to do this and portable
* drivers should not do this anyway.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
/*
* Ignore write-combine; for now only return uncached mappings.
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
#ifndef CONFIG_GENERIC_IOMAP
void __iomem *__pci_ioport_map(struct pci_dev *dev,

View file

@ -66,8 +66,8 @@ extern unsigned long PCIBIOS_MIN_IO, PCIBIOS_MIN_MEM;
struct pci_dev;
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
extern void pcibios_set_master(struct pci_dev *dev);
/* Dynamic DMA mapping stuff.

View file

@ -42,13 +42,10 @@ static inline int pci_proc_domain(struct pci_bus *bus)
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
#define HAVE_PCI_MMAP
#define arch_can_pci_mmap_io() 1
#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
#define get_pci_unmapped_area get_fb_unmapped_area
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine);
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return PCI_IRQ_NONE;

View file

@ -862,9 +862,9 @@ static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vm
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
int pci_mmap_page_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
int ret;

View file

@ -17,8 +17,7 @@
#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#endif /* __KERNEL__ */
#endif

View file

@ -356,26 +356,3 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
}
return 0;
}
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long phys;
if (mmap_state == pci_mmap_io)
return -EINVAL;
phys = vma->vm_pgoff;
/*
* Mark this as IO
*/
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (remap_pfn_range(vma, vma->vm_start, phys,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
return 0;
}

View file

@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/pat.h>
#include <asm/x86_init.h>
#ifdef __KERNEL__
@ -102,10 +103,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine);
#define arch_can_pci_mmap_wc() pat_enabled()
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
#ifdef CONFIG_PCI
extern void early_quirks(void);

View file

@ -406,50 +406,3 @@ void __init pcibios_resource_survey(void)
*/
ioapic_insert_resources();
}
static const struct vm_operations_struct pci_mmap_ops = {
.access = generic_access_phys,
};
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long prot;
/* I/O space cannot be accessed via normal processor loads and
* stores on this platform.
*/
if (mmap_state == pci_mmap_io)
return -EINVAL;
prot = pgprot_val(vma->vm_page_prot);
/*
* Return error if pat is not enabled and write_combine is requested.
* Caller can followup with UC MINUS request and add a WC mtrr if there
* is a free mtrr slot.
*/
if (!pat_enabled() && write_combine)
return -EINVAL;
if (pat_enabled() && write_combine)
prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
else if (pat_enabled() || boot_cpu_data.x86 > 3)
/*
* ioremap() and ioremap_nocache() defaults to UC MINUS for now.
* To avoid attribute conflicts, request UC MINUS here
* as well.
*/
prot |= cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
vma->vm_page_prot = __pgprot(prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
vma->vm_ops = &pci_mmap_ops;
return 0;
}

View file

@ -46,12 +46,9 @@ struct pci_dev;
#define PCI_DMA_BUS_IS_PHYS (1)
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#define HAVE_PCI_MMAP 1
#define arch_can_pci_mmap_io() 1
#endif /* __KERNEL__ */

View file

@ -333,25 +333,6 @@ __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
return -EINVAL;
}
/*
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
static __inline__ void
__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
int prot = pgprot_val(vma->vm_page_prot);
/* Set to write-through */
prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
#if 0
if (!write_combine)
prot |= _PAGE_WRITETHRU;
#endif
vma->vm_page_prot = __pgprot(prot);
}
/*
* Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map
@ -362,7 +343,8 @@ __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
int pci_mmap_page_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
@ -372,7 +354,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
if (ret < 0)
return ret;
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,vma->vm_page_prot);

View file

@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o vpd.o setup-bus.o vc.o
irq.o vpd.o setup-bus.o vc.o mmap.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o

99
drivers/pci/mmap.c Normal file
View file

@ -0,0 +1,99 @@
/*
* mmap.c generic PCI resource mmap helper
*
* Copyright © 2017 Amazon.com, Inc. or its affiliates.
*
* Author: David Woodhouse <dwmw2@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
/*
* Modern setup: generic pci_mmap_resource_range(), and implement the legacy
* pci_mmap_page_range() (if needed) as a wrapper round it.
*/
#ifdef HAVE_PCI_MMAP
int pci_mmap_page_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t start, end;
pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
/* Adjust vm_pgoff to be the offset within the resource */
vma->vm_pgoff -= start >> PAGE_SHIFT;
return pci_mmap_resource_range(pdev, bar, vma, mmap_state,
write_combine);
}
#endif
static const struct vm_operations_struct pci_phys_vm_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
.access = generic_access_phys,
#endif
};
int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long size;
int ret;
size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1;
if (vma->vm_pgoff + vma_pages(vma) > size)
return -EINVAL;
if (write_combine)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
if (mmap_state == pci_mmap_io) {
ret = pci_iobar_pfn(pdev, bar, vma);
if (ret)
return ret;
} else
vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT);
vma->vm_ops = &pci_phys_vm_ops;
return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
#elif defined(HAVE_PCI_MMAP) /* && !ARCH_GENERIC_PCI_MMAP_RESOURCE */
/*
* Legacy setup: Impement pci_mmap_resource_range() as a wrapper around
* the architecture's pci_mmap_page_range(), converting to "user visible"
* addresses as necessary.
*/
int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
resource_size_t start, end;
/*
* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
*/
pci_resource_to_user(pdev, bar, &pdev->resource[bar], &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
return pci_mmap_page_range(pdev, bar, vma, mmap_state, write_combine);
}
#endif

View file

@ -980,20 +980,24 @@ void pci_remove_legacy_files(struct pci_bus *b)
}
#endif /* HAVE_PCI_LEGACY */
#ifdef HAVE_PCI_MMAP
#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api)
{
unsigned long nr, start, size, pci_start;
unsigned long nr, start, size;
resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0)
return 0;
nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
if (mmap_api == PCI_MMAP_PROCFS) {
pci_resource_to_user(pdev, resno, &pdev->resource[resno],
&pci_start, &pci_end);
pci_start >>= PAGE_SHIFT;
}
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;
@ -1013,37 +1017,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
struct resource *res = attr->private;
int bar = (unsigned long)attr->private;
enum pci_mmap_state mmap_type;
resource_size_t start, end;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
struct resource *res = &pdev->resource[bar];
if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
return -EINVAL;
if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
pci_name(pdev), i,
(u64)pci_resource_start(pdev, i),
(u64)pci_resource_len(pdev, i));
pci_name(pdev), bar,
(u64)pci_resource_start(pdev, bar),
(u64)pci_resource_len(pdev, bar));
return -EINVAL;
}
/* pci_mmap_page_range() expects the same kind of entry as coming
* from /proc/bus/pci/ which is a "user visible" value. If this is
* different from the resource itself, arch will do necessary fixup.
*/
pci_resource_to_user(pdev, i, res, &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine);
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
@ -1065,22 +1056,18 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
loff_t off, size_t count, bool write)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
struct resource *res = attr->private;
int bar = (unsigned long)attr->private;
struct resource *res;
unsigned long port = off;
int i;
for (i = 0; i < PCI_ROM_RESOURCE; i++)
if (res == &pdev->resource[i])
break;
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
res = &pdev->resource[bar];
port += pci_resource_start(pdev, i);
port += pci_resource_start(pdev, bar);
if (port > pci_resource_end(pdev, i))
if (port > pci_resource_end(pdev, bar))
return 0;
if (port + count - 1 > pci_resource_end(pdev, i))
if (port + count - 1 > pci_resource_end(pdev, bar))
return -EINVAL;
switch (count) {
@ -1170,16 +1157,19 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
pdev->res_attr[num] = res_attr;
sprintf(res_attr_name, "resource%d", num);
res_attr->mmap = pci_mmap_resource_uc;
}
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
res_attr->read = pci_read_resource_io;
res_attr->write = pci_write_resource_io;
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
res_attr->read = pci_read_resource_io;
res_attr->write = pci_write_resource_io;
if (arch_can_pci_mmap_io())
res_attr->mmap = pci_mmap_resource_uc;
} else {
res_attr->mmap = pci_mmap_resource_uc;
}
}
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = S_IRUSR | S_IWUSR;
res_attr->size = pci_resource_len(pdev, num);
res_attr->private = &pdev->resource[num];
res_attr->private = (void *)(unsigned long)num;
retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
if (retval)
kfree(res_attr);
@ -1207,9 +1197,9 @@ static int pci_create_resource_files(struct pci_dev *pdev)
retval = pci_create_attr(pdev, i, 0);
/* for prefetchable resources, create a WC mappable file */
if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
if (!retval && arch_can_pci_mmap_wc() &&
pdev->resource[i].flags & IORESOURCE_PREFETCH)
retval = pci_create_attr(pdev, i, 1);
if (retval) {
pci_remove_resource_files(pdev);
return retval;

View file

@ -21,14 +21,14 @@ void pci_create_firmware_label_files(struct pci_dev *pdev);
void pci_remove_firmware_label_files(struct pci_dev *pdev);
#endif
void pci_cleanup_rom(struct pci_dev *dev);
#ifdef HAVE_PCI_MMAP
enum pci_mmap_api {
PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
};
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
#endif
int pci_probe_reset_function(struct pci_dev *dev);
/**

View file

@ -202,6 +202,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
case PCIIOC_MMAP_IS_IO:
if (!arch_can_pci_mmap_io())
return -EINVAL;
fpriv->mmap_state = pci_mmap_io;
break;
@ -210,14 +212,15 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
break;
case PCIIOC_WRITE_COMBINE:
if (arg)
fpriv->write_combine = 1;
else
fpriv->write_combine = 0;
break;
if (arch_can_pci_mmap_wc()) {
if (arg)
fpriv->write_combine = 1;
else
fpriv->write_combine = 0;
break;
}
/* If arch decided it can't, fall through... */
#endif /* HAVE_PCI_MMAP */
default:
ret = -EINVAL;
break;
@ -231,25 +234,35 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
struct pci_dev *dev = PDE_DATA(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
int i, ret, write_combine;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
res_bit = IORESOURCE_IO;
}
/* Make sure the caller is mapping a real resource for this device */
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
if (dev->resource[i].flags & res_bit &&
pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
break;
}
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
if (fpriv->mmap_state == pci_mmap_mem)
write_combine = fpriv->write_combine;
else
write_combine = 0;
ret = pci_mmap_page_range(dev, vma,
if (fpriv->mmap_state == pci_mmap_mem &&
fpriv->write_combine) {
if (dev->resource[i].flags & IORESOURCE_PREFETCH)
write_combine = 1;
else
return -EINVAL;
}
ret = pci_mmap_page_range(dev, i, vma,
fpriv->mmap_state, write_combine);
if (ret < 0)
return ret;

View file

@ -1617,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
/* These two functions provide almost identical functionality. Depennding
* on the architecture, one will be implemented as a wrapper around the
* other (in drivers/pci/mmap.c).
*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
* pci_mmap_page_range() is the legacy architecture-specific interface,
* which accepts a "user visible" resource address converted by
* pci_resource_to_user(), as used in the legacy mmap() interface in
* /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
int pci_mmap_page_range(struct pci_dev *pdev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
#endif
#ifndef arch_can_pci_mmap_io
#define arch_can_pci_mmap_io() 0
#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
#else
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
#endif
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif