1
0
Fork 0

Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (27 commits)
  PCI: Don't use dmi_name_in_vendors in quirk
  PCI: remove unused AER functions
  PCI/sysfs: move bus cpuaffinity to class dev_attrs
  PCI: add rescan to /sys/.../pci_bus/.../
  PCI: update bridge resources to get more big ranges when allocating space (again)
  KVM: Use pci_store/load_saved_state() around VM device usage
  PCI: Add interfaces to store and load the device saved state
  PCI: Track the size of each saved capability data area
  PCI/e1000e: Add and use pci_disable_link_state_locked()
  x86/PCI: derive pcibios_last_bus from ACPI MCFG
  PCI: add latency tolerance reporting enable/disable support
  PCI: add OBFF enable/disable support
  PCI: add ID-based ordering enable/disable support
  PCI hotplug: acpiphp: assume device is in state D0 after powering on a slot.
  PCI: Set PCIE maxpayload for card during hotplug insertion
  PCI/ACPI: Report _OSC control mask returned on failure to get control
  x86/PCI: irq and pci_ids patch for Intel Panther Point DeviceIDs
  PCI: handle positive error codes
  PCI: check pci_vpd_pci22_wait() return
  PCI: Use ICH6_GPIO_EN in ich6_lpc_acpi_gpio
  ...

Fix up trivial conflicts in include/linux/pci_ids.h: commit a6e5e2be44
moved the intel SMBUS ID definitons to the i2c-i801.c driver.
hifive-unleashed-5.1
Linus Torvalds 2011-05-23 15:39:34 -07:00
commit 5e152b4c9e
30 changed files with 818 additions and 177 deletions

View File

@ -74,6 +74,15 @@ Description:
hot-remove the PCI device and any of its children.
Depends on CONFIG_HOTPLUG.
What: /sys/bus/pci/devices/.../pci_bus/.../rescan
Date: May 2011
Contact: Linux PCI developers <linux-pci@vger.kernel.org>
Description:
Writing a non-zero value to this attribute will
force a rescan of the bus and all child buses,
and re-discover devices removed earlier from this
part of the device tree. Depends on CONFIG_HOTPLUG.
What: /sys/bus/pci/devices/.../rescan
Date: January 2009
Contact: Linux PCI developers <linux-pci@vger.kernel.org>

View File

@ -135,8 +135,6 @@ void default_teardown_msi_irqs(struct pci_dev *dev);
#include "pci_64.h"
#endif
void dma32_reserve_bootmem(void);
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>

View File

@ -68,74 +68,10 @@ int dma_set_mask(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_set_mask);
#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
static __initdata void *dma32_bootmem_ptr;
static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
static int __init parse_dma32_size_opt(char *p)
{
if (!p)
return -EINVAL;
dma32_bootmem_size = memparse(p, &p);
return 0;
}
early_param("dma32_size", parse_dma32_size_opt);
void __init dma32_reserve_bootmem(void)
{
unsigned long size, align;
if (max_pfn <= MAX_DMA32_PFN)
return;
/*
* check aperture_64.c allocate_aperture() for reason about
* using 512M as goal
*/
align = 64ULL<<20;
size = roundup(dma32_bootmem_size, align);
dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
512ULL<<20);
/*
* Kmemleak should not scan this block as it may not be mapped via the
* kernel direct mapping.
*/
kmemleak_ignore(dma32_bootmem_ptr);
if (dma32_bootmem_ptr)
dma32_bootmem_size = size;
else
dma32_bootmem_size = 0;
}
static void __init dma32_free_bootmem(void)
{
if (max_pfn <= MAX_DMA32_PFN)
return;
if (!dma32_bootmem_ptr)
return;
free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
dma32_bootmem_ptr = NULL;
dma32_bootmem_size = 0;
}
#else
void __init dma32_reserve_bootmem(void)
{
}
static void __init dma32_free_bootmem(void)
{
}
#endif
void __init pci_iommu_alloc(void)
{
struct iommu_table_entry *p;
/* free the range so iommu could get some range less than 4G */
dma32_free_bootmem();
sort_iommu_table(__iommu_table, __iommu_table_end);
check_iommu_entries(__iommu_table, __iommu_table_end);

View File

@ -964,7 +964,6 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
memblock_find_dma_reserve();
dma32_reserve_bootmem();
#ifdef CONFIG_KVM_CLOCK
kvmclock_init();

View File

@ -280,12 +280,9 @@ void __init pci_direct_init(int type)
int __init pci_direct_probe(void)
{
struct resource *region, *region2;
if ((pci_probe & PCI_PROBE_CONF1) == 0)
goto type2;
region = request_region(0xCF8, 8, "PCI conf1");
if (!region)
if (!request_region(0xCF8, 8, "PCI conf1"))
goto type2;
if (pci_check_type1()) {
@ -293,16 +290,14 @@ int __init pci_direct_probe(void)
port_cf9_safe = true;
return 1;
}
release_resource(region);
release_region(0xCF8, 8);
type2:
if ((pci_probe & PCI_PROBE_CONF2) == 0)
return 0;
region = request_region(0xCF8, 4, "PCI conf2");
if (!region)
if (!request_region(0xCF8, 4, "PCI conf2"))
return 0;
region2 = request_region(0xC000, 0x1000, "PCI conf2");
if (!region2)
if (!request_region(0xC000, 0x1000, "PCI conf2"))
goto fail2;
if (pci_check_type2()) {
@ -311,8 +306,8 @@ int __init pci_direct_probe(void)
return 2;
}
release_resource(region2);
release_region(0xC000, 0x1000);
fail2:
release_resource(region);
release_region(0xCF8, 4);
return 0;
}

View File

@ -602,7 +602,9 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
|| (device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)
|| (device >= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX)) {
device <= PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX)
|| (device >= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN &&
device <= PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX)) {
r->name = "PIIX/ICH";
r->get = pirq_piix_get;
r->set = pirq_piix_set;

View File

@ -606,6 +606,16 @@ static void __init __pci_mmcfg_init(int early)
if (list_empty(&pci_mmcfg_list))
return;
if (pcibios_last_bus < 0) {
const struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (cfg->segment)
break;
pcibios_last_bus = cfg->end_bus;
}
}
if (pci_mmcfg_arch_init())
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
else {

View File

@ -596,12 +596,18 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
} else {
dev_dbg(root->bus->bridge,
"ACPI _OSC request failed (code %d)\n", status);
printk(KERN_INFO "Unable to assume _OSC PCIe control. "
"Disabling ASPM\n");
dev_info(root->bus->bridge,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
pr_info("ACPI _OSC control for PCIe not granted, "
"disabling ASPM\n");
pcie_no_aspm();
}
} else {
dev_info(root->bus->bridge,
"Unable to request _OSC control "
"(_OSC support mask: 0x%02x)\n", flags);
}
pci_acpi_add_bus_pm_notifier(device, root->bus);

View File

@ -137,6 +137,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71

View File

@ -5361,7 +5361,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
#ifdef CONFIG_PCIEASPM
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
pci_disable_link_state(pdev, state);
pci_disable_link_state_locked(pdev, state);
}
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)

View File

@ -143,33 +143,41 @@ static noinline void pci_wait_ucfg(struct pci_dev *dev)
__remove_wait_queue(&pci_ucfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
#define PCI_USER_READ_CONFIG(size,type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
int ret = 0; \
u32 data = -1; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
*val = (type)data; \
if (ret > 0) \
ret = -EINVAL; \
return ret; \
}
/* Returns 0 on success, negative values indicate error. */
#define PCI_USER_WRITE_CONFIG(size,type) \
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
int ret = -EIO; \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
if (ret > 0) \
ret = -EINVAL; \
return ret; \
}
@ -197,6 +205,8 @@ struct pci_vpd_pci22 {
* This code has to spin since there is no other notification from the PCI
* hardware. Since the VPD is often implemented by serial attachment to an
* EEPROM, it may take many milliseconds to complete.
*
* Returns 0 on success, negative values indicate error.
*/
static int pci_vpd_pci22_wait(struct pci_dev *dev)
{
@ -212,7 +222,7 @@ static int pci_vpd_pci22_wait(struct pci_dev *dev)
for (;;) {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret)
if (ret < 0)
return ret;
if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
@ -324,6 +334,8 @@ static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count
vpd->busy = true;
vpd->flag = 0;
ret = pci_vpd_pci22_wait(dev);
if (ret < 0)
break;
pos += sizeof(u32);
}

View File

@ -163,12 +163,6 @@ int pci_bus_add_child(struct pci_bus *bus)
bus->is_added = 1;
retval = device_create_file(&bus->dev, &dev_attr_cpuaffinity);
if (retval)
return retval;
retval = device_create_file(&bus->dev, &dev_attr_cpulistaffinity);
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(bus);

View File

@ -827,6 +827,13 @@ static int __ref enable_device(struct acpiphp_slot *slot)
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
pci_enable_bridges(bus);
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Assume that newly added devices are powered on already. */
if (!dev->is_added)
dev->current_state = PCI_D0;
}
pci_bus_add_devices(bus);
list_for_each_entry(func, &slot->funcs, sibling) {

View File

@ -158,6 +158,47 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */
static int pci_set_payload(struct pci_dev *dev)
{
int pos, ppos;
u16 pctl, psz;
u16 dctl, dsz, dcap, dmax;
struct pci_dev *parent;
parent = dev->bus->self;
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!pos)
return 0;
/* Read Device MaxPayload capability and setting */
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl);
pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap);
dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD);
/* Read Parent MaxPayload setting */
ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
if (!ppos)
return 0;
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
/* If parent payload > device max payload -> error
* If parent payload > device payload -> set speed
* If parent payload <= device payload -> do nothing
*/
if (psz > dmax)
return -1;
else if (psz > dsz) {
dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz);
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
(dctl & ~PCI_EXP_DEVCTL_PAYLOAD) +
(psz << 5));
}
return 0;
}
void pci_configure_slot(struct pci_dev *dev)
{
struct pci_dev *cdev;
@ -169,6 +210,10 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
ret = pci_set_payload(dev);
if (ret)
dev_warn(&dev->dev, "could not set device max payload\n");
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
if (ret)

View File

@ -108,6 +108,40 @@ static ssize_t local_cpulist_show(struct device *dev,
return len;
}
/*
* PCI Bus Class Devices
*/
static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
int type,
struct device_attribute *attr,
char *buf)
{
int ret;
const struct cpumask *cpumask;
cpumask = cpumask_of_pcibus(to_pci_bus(dev));
ret = type ?
cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
buf[ret++] = '\n';
buf[ret] = '\0';
return ret;
}
static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
}
static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
}
/* show resources */
static ssize_t
resource_show(struct device * dev, struct device_attribute *attr, char * buf)
@ -318,6 +352,25 @@ remove_store(struct device *dev, struct device_attribute *dummy,
count = ret;
return count;
}
static ssize_t
dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val;
struct pci_bus *bus = to_pci_bus(dev);
if (strict_strtoul(buf, 0, &val) < 0)
return -EINVAL;
if (val) {
mutex_lock(&pci_remove_rescan_mutex);
pci_rescan_bus(bus);
mutex_unlock(&pci_remove_rescan_mutex);
}
return count;
}
#endif
struct device_attribute pci_dev_attrs[] = {
@ -347,6 +400,15 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR_NULL,
};
struct device_attribute pcibus_dev_attrs[] = {
#ifdef CONFIG_HOTPLUG
__ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store),
#endif
__ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL),
__ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL),
__ATTR_NULL,
};
static ssize_t
boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
{

View File

@ -830,7 +830,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
dev_err(&dev->dev, "buffer not found in %s\n", __func__);
return -ENOMEM;
}
cap = (u16 *)&save_state->data[0];
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
@ -863,7 +863,7 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
if (!save_state || pos <= 0)
return;
cap = (u16 *)&save_state->data[0];
cap = (u16 *)&save_state->cap.data[0];
pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
@ -899,7 +899,8 @@ static int pci_save_pcix_state(struct pci_dev *dev)
return -ENOMEM;
}
pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
pci_read_config_word(dev, pos + PCI_X_CMD,
(u16 *)save_state->cap.data);
return 0;
}
@ -914,7 +915,7 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!save_state || pos <= 0)
return;
cap = (u16 *)&save_state->data[0];
cap = (u16 *)&save_state->cap.data[0];
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
@ -975,6 +976,104 @@ void pci_restore_state(struct pci_dev *dev)
dev->state_saved = false;
}
struct pci_saved_state {
u32 config_space[16];
struct pci_cap_saved_data cap[0];
};
/**
* pci_store_saved_state - Allocate and return an opaque struct containing
* the device saved state.
* @dev: PCI device that we're dealing with
*
* Rerturn NULL if no state or error.
*/
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
{
struct pci_saved_state *state;
struct pci_cap_saved_state *tmp;
struct pci_cap_saved_data *cap;
struct hlist_node *pos;
size_t size;
if (!dev->state_saved)
return NULL;
size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
state = kzalloc(size, GFP_KERNEL);
if (!state)
return NULL;
memcpy(state->config_space, dev->saved_config_space,
sizeof(state->config_space));
cap = state->cap;
hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
memcpy(cap, &tmp->cap, len);
cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
}
/* Empty cap_save terminates list */
return state;
}
EXPORT_SYMBOL_GPL(pci_store_saved_state);
/**
* pci_load_saved_state - Reload the provided save state into struct pci_dev.
* @dev: PCI device that we're dealing with
* @state: Saved state returned from pci_store_saved_state()
*/
int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
{
struct pci_cap_saved_data *cap;
dev->state_saved = false;
if (!state)
return 0;
memcpy(dev->saved_config_space, state->config_space,
sizeof(state->config_space));
cap = state->cap;
while (cap->size) {
struct pci_cap_saved_state *tmp;
tmp = pci_find_saved_cap(dev, cap->cap_nr);
if (!tmp || tmp->cap.size != cap->size)
return -EINVAL;
memcpy(tmp->cap.data, cap->data, tmp->cap.size);
cap = (struct pci_cap_saved_data *)((u8 *)cap +
sizeof(struct pci_cap_saved_data) + cap->size);
}
dev->state_saved = true;
return 0;
}
EXPORT_SYMBOL_GPL(pci_load_saved_state);
/**
* pci_load_and_free_saved_state - Reload the save state pointed to by state,
* and free the memory allocated for it.
* @dev: PCI device that we're dealing with
* @state: Pointer to saved state returned from pci_store_saved_state()
*/
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state)
{
int ret = pci_load_saved_state(dev, *state);
kfree(*state);
*state = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@ -1771,7 +1870,8 @@ static int pci_add_cap_save_buffer(
if (!save_state)
return -ENOMEM;
save_state->cap_nr = cap;
save_state->cap.cap_nr = cap;
save_state->cap.size = size;
pci_add_saved_cap(dev, save_state);
return 0;
@ -1834,6 +1934,300 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
/**
* pci_enable_ido - enable ID-based ordering on a device
* @dev: the PCI device
* @type: which types of IDO to enable
*
* Enable ID-based ordering on @dev. @type can contain the bits
* %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
* which types of transactions are allowed to be re-ordered.
*/
void pci_enable_ido(struct pci_dev *dev, unsigned long type)
{
int pos;
u16 ctrl;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (type & PCI_EXP_IDO_REQUEST)
ctrl |= PCI_EXP_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
ctrl |= PCI_EXP_IDO_CMP_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_enable_ido);
/**
* pci_disable_ido - disable ID-based ordering on a device
* @dev: the PCI device
* @type: which types of IDO to disable
*/
void pci_disable_ido(struct pci_dev *dev, unsigned long type)
{
int pos;
u16 ctrl;
if (!pci_is_pcie(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (type & PCI_EXP_IDO_REQUEST)
ctrl &= ~PCI_EXP_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
ctrl &= ~PCI_EXP_IDO_CMP_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_ido);
/**
* pci_enable_obff - enable optimized buffer flush/fill
* @dev: PCI device
* @type: type of signaling to use
*
* Try to enable @type OBFF signaling on @dev. It will try using WAKE#
* signaling if possible, falling back to message signaling only if
* WAKE# isn't supported. @type should indicate whether the PCIe link
* be brought out of L0s or L1 to send the message. It should be either
* %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
*
* If your device can benefit from receiving all messages, even at the
* power cost of bringing the link back up from a low power state, use
* %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
* preferred type).
*
* RETURNS:
* Zero on success, appropriate error number on failure.
*/
int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
{
int pos;
u32 cap;
u16 ctrl;
int ret;
if (!pci_is_pcie(dev))
return -ENOTSUPP;
pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTSUPP;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_OBFF_MASK))
return -ENOTSUPP; /* no OBFF support at all */
/* Make sure the topology supports OBFF as well */
if (dev->bus) {
ret = pci_enable_obff(dev->bus->self, type);
if (ret)
return ret;
}
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
if (cap & PCI_EXP_OBFF_WAKE)
ctrl |= PCI_EXP_OBFF_WAKE_EN;
else {
switch (type) {
case PCI_EXP_OBFF_SIGNAL_L0:
if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
ctrl |= PCI_EXP_OBFF_MSGA_EN;
break;
case PCI_EXP_OBFF_SIGNAL_ALWAYS:
ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
ctrl |= PCI_EXP_OBFF_MSGB_EN;
break;
default:
WARN(1, "bad OBFF signal type\n");
return -ENOTSUPP;
}
}
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
return 0;
}
EXPORT_SYMBOL(pci_enable_obff);
/**
* pci_disable_obff - disable optimized buffer flush/fill
* @dev: PCI device
*
* Disable OBFF on @dev.
*/
void pci_disable_obff(struct pci_dev *dev)
{
int pos;
u16 ctrl;
if (!pci_is_pcie(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_obff);
/**
* pci_ltr_supported - check whether a device supports LTR
* @dev: PCI device
*
* RETURNS:
* True if @dev supports latency tolerance reporting, false otherwise.
*/
bool pci_ltr_supported(struct pci_dev *dev)
{
int pos;
u32 cap;
if (!pci_is_pcie(dev))
return false;
pos = pci_pcie_cap(dev);
if (!pos)
return false;
pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
return cap & PCI_EXP_DEVCAP2_LTR;
}
EXPORT_SYMBOL(pci_ltr_supported);
/**
* pci_enable_ltr - enable latency tolerance reporting
* @dev: PCI device
*
* Enable LTR on @dev if possible, which means enabling it first on
* upstream ports.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int pci_enable_ltr(struct pci_dev *dev)
{
int pos;
u16 ctrl;
int ret;
if (!pci_ltr_supported(dev))
return -ENOTSUPP;
pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTSUPP;
/* Only primary function can enable/disable LTR */
if (PCI_FUNC(dev->devfn) != 0)
return -EINVAL;
/* Enable upstream ports first */
if (dev->bus) {
ret = pci_enable_ltr(dev->bus->self);
if (ret)
return ret;
}
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl |= PCI_EXP_LTR_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
return 0;
}
EXPORT_SYMBOL(pci_enable_ltr);
/**
* pci_disable_ltr - disable latency tolerance reporting
* @dev: PCI device
*/
void pci_disable_ltr(struct pci_dev *dev)
{
int pos;
u16 ctrl;
if (!pci_ltr_supported(dev))
return;
pos = pci_pcie_cap(dev);
if (!pos)
return;
/* Only primary function can enable/disable LTR */
if (PCI_FUNC(dev->devfn) != 0)
return;
pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
ctrl &= ~PCI_EXP_LTR_EN;
pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
}
EXPORT_SYMBOL(pci_disable_ltr);
static int __pci_ltr_scale(int *val)
{
int scale = 0;
while (*val > 1023) {
*val = (*val + 31) / 32;
scale++;
}
return scale;
}
/**
* pci_set_ltr - set LTR latency values
* @dev: PCI device
* @snoop_lat_ns: snoop latency in nanoseconds
* @nosnoop_lat_ns: nosnoop latency in nanoseconds
*
* Figure out the scale and set the LTR values accordingly.
*/
int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
{
int pos, ret, snoop_scale, nosnoop_scale;
u16 val;
if (!pci_ltr_supported(dev))
return -ENOTSUPP;
snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
return -EINVAL;
if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
(nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
return -EINVAL;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
if (!pos)
return -ENOTSUPP;
val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
if (ret != 4)
return -EIO;
val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
if (ret != 4)
return -EIO;
return 0;
}
EXPORT_SYMBOL(pci_set_ltr);
static int pci_acs_enable;
/**
@ -2479,6 +2873,21 @@ clear:
return 0;
}
/**
* pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
* @dev: Device to reset.
* @probe: If set, only check if the device can be reset this way.
*
* If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
* unset, it will be reinitialized internally when going from PCI_D3hot to
* PCI_D0. If that's the case and the device is not in a low-power state
* already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
* by devault (i.e. unless the @dev's d3_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
{
u16 csr;

View File

@ -156,8 +156,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern struct device_attribute pci_dev_attrs[];
extern struct device_attribute dev_attr_cpuaffinity;
extern struct device_attribute dev_attr_cpulistaffinity;
extern struct device_attribute pcibus_dev_attrs[];
#ifdef CONFIG_HOTPLUG
extern struct bus_attribute pci_bus_attrs[];
#else

View File

@ -326,7 +326,7 @@ static int aer_inject(struct aer_error_inj *einj)
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
u32 sever, cor_mask, uncor_mask, cor_mask_orig, uncor_mask_orig;
u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);

View File

@ -114,15 +114,6 @@ extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
#else
static inline int aer_osc_setup(struct pcie_device *pciedev)
{
return 0;
}
#endif
#ifdef CONFIG_ACPI_APEI
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
#else

View File

@ -608,7 +608,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
if (aspm_policy != POLICY_POWERSAVE) {
if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
@ -734,7 +734,7 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
* pci_disable_link_state - disable pci device's link state, so the link will
* never enter specific states
*/
void pci_disable_link_state(struct pci_dev *pdev, int state)
static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
@ -747,7 +747,8 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
if (!parent || !parent->link_state)
return;
down_read(&pci_bus_sem);
if (sem)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link = parent->link_state;
if (state & PCIE_LINK_STATE_L0S)
@ -761,7 +762,19 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
pcie_set_clkpm(link, 0);
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
if (sem)
up_read(&pci_bus_sem);
}
void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
__pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
__pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state);

View File

@ -42,43 +42,6 @@ int no_pci_devices(void)
}
EXPORT_SYMBOL(no_pci_devices);
/*
* PCI Bus Class Devices
*/
static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
int type,
struct device_attribute *attr,
char *buf)
{
int ret;
const struct cpumask *cpumask;
cpumask = cpumask_of_pcibus(to_pci_bus(dev));
ret = type?
cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
buf[ret++] = '\n';
buf[ret] = '\0';
return ret;
}
static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
}
static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
}
DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
/*
* PCI Bus Class
*/
@ -95,6 +58,7 @@ static void release_pcibus_dev(struct device *dev)
static struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
.dev_attrs = pcibus_dev_attrs,
};
static int __init pcibus_class_init(void)
@ -1455,9 +1419,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
error = device_register(&b->dev);
if (error)
goto class_dev_reg_err;
error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
if (error)
goto dev_create_file_err;
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
@ -1468,8 +1429,6 @@ struct pci_bus * pci_create_bus(struct device *parent,
return b;
dev_create_file_err:
device_unregister(&b->dev);
class_dev_reg_err:
device_unregister(dev);
dev_reg_err:

View File

@ -606,7 +606,7 @@ static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev)
}
pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
if (enable & ICH4_GPIO_EN) {
if (enable & ICH6_GPIO_EN) {
pci_read_config_dword(dev, ICH6_GPIOBASE, &region);
region &= PCI_BASE_ADDRESS_IO_MASK;
if (region >= PCIBIOS_MIN_IO)
@ -681,7 +681,7 @@ static void __devinit ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
/* ICH7-10 has the same common LPC generic IO decode registers */
static void __devinit quirk_ich7_lpc(struct pci_dev *dev)
{
/* We share the common ACPI/DPIO decode with ICH6 */
/* We share the common ACPI/GPIO decode with ICH6 */
ich6_lpc_acpi_gpio(dev);
/* And have 4 ICH7+ generic decodes */
@ -2349,8 +2349,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
*/
static void __devinit nvenet_msi_disable(struct pci_dev *dev)
{
if (dmi_name_in_vendors("P5N32-SLI PREMIUM") ||
dmi_name_in_vendors("P5N32-E SLI")) {
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_name &&
(strstr(board_name, "P5N32-SLI PREMIUM") ||
strstr(board_name, "P5N32-E SLI"))) {
dev_info(&dev->dev,
"Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
@ -2784,6 +2787,16 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
#endif
static void __devinit fixup_ti816x_class(struct pci_dev* dev)
{
/* TI 816x devices do not have class code set when in PCIe boot mode */
if (dev->class == PCI_CLASS_NOT_DEFINED) {
dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{

View File

@ -73,8 +73,6 @@ void pci_remove_bus(struct pci_bus *pci_bus)
return;
pci_remove_legacy_files(pci_bus);
device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity);
device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity);
device_unregister(&pci_bus->dev);
}
EXPORT_SYMBOL(pci_remove_bus);

View File

@ -991,30 +991,139 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
static int __init pci_bus_get_depth(struct pci_bus *bus)
{
int depth = 0;
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
int ret;
struct pci_bus *b = dev->subordinate;
if (!b)
continue;
ret = pci_bus_get_depth(b);
if (ret + 1 > depth)
depth = ret + 1;
}
return depth;
}
static int __init pci_get_max_depth(void)
{
int depth = 0;
struct pci_bus *bus;
list_for_each_entry(bus, &pci_root_buses, node) {
int ret;
ret = pci_bus_get_depth(bus);
if (ret > depth)
depth = ret;
}
return depth;
}
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
* will stop till to the max deepth if can not find good one
*/
void __init
pci_assign_unassigned_resources(void)
{
struct pci_bus *bus;
struct resource_list_x add_list; /* list of resources that
want additional resources */
int tried_times = 0;
enum release_type rel_type = leaf_only;
struct resource_list_x head, *list;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
unsigned long failed_type;
int max_depth = pci_get_max_depth();
int pci_try_num;
head.next = NULL;
add_list.next = NULL;
pci_try_num = max_depth + 1;
printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
max_depth, pci_try_num);
again:
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
list_for_each_entry(bus, &pci_root_buses, node) {
list_for_each_entry(bus, &pci_root_buses, node)
__pci_bus_size_bridges(bus, &add_list);
}
/* Depth last, allocate resources and update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node) {
__pci_bus_assign_resources(bus, &add_list, NULL);
pci_enable_bridges(bus);
}
list_for_each_entry(bus, &pci_root_buses, node)
__pci_bus_assign_resources(bus, &add_list, &head);
BUG_ON(add_list.next);
tried_times++;
/* any device complain? */
if (!head.next)
goto enable_and_dump;
failed_type = 0;
for (list = head.next; list;) {
failed_type |= list->flags;
list = list->next;
}
/*
* io port are tight, don't try extra
* or if reach the limit, don't want to try more
*/
failed_type &= type_mask;
if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
free_list(resource_list_x, &head);
goto enable_and_dump;
}
printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
tried_times + 1);
/* third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
rel_type = whole_subtree;
/*
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
for (list = head.next; list;) {
bus = list->dev->bus;
pci_bus_release_bridge_resources(bus, list->flags & type_mask,
rel_type);
list = list->next;
}
/* restore size and flags */
for (list = head.next; list;) {
struct resource *res = list->res;
res->start = list->start;
res->end = list->end;
res->flags = list->flags;
if (list->dev->subordinate)
res->flags = 0;
list = list->next;
}
free_list(resource_list_x, &head);
goto again;
enable_and_dump:
/* Depth last, update the hardware. */
list_for_each_entry(bus, &pci_root_buses, node)
pci_enable_bridges(bus);
/* dump the resource on buses */
list_for_each_entry(bus, &pci_root_buses, node) {
list_for_each_entry(bus, &pci_root_buses, node)
pci_bus_dump_resources(bus);
}
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)

View File

@ -517,6 +517,7 @@ struct kvm_assigned_dev_kernel {
struct kvm *kvm;
spinlock_t intx_lock;
char irq_name[32];
struct pci_saved_state *pci_saved_state;
};
struct kvm_irq_mask_notifier {

View File

@ -28,6 +28,7 @@ extern void pcie_aspm_exit_link_state(struct pci_dev *pdev);
extern void pcie_aspm_pm_state_change(struct pci_dev *pdev);
extern void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
extern void pci_disable_link_state(struct pci_dev *pdev, int state);
extern void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
extern void pcie_clear_aspm(void);
extern void pcie_no_aspm(void);
#else

View File

@ -214,10 +214,15 @@ enum pci_bus_speed {
PCI_SPEED_UNKNOWN = 0xff,
};
struct pci_cap_saved_data {
char cap_nr;
unsigned int size;
u32 data[0];
};
struct pci_cap_saved_state {
struct hlist_node next;
char cap_nr;
u32 data[0];
struct pci_cap_saved_data cap;
};
struct pcie_link_state;
@ -366,7 +371,7 @@ static inline struct pci_cap_saved_state *pci_find_saved_cap(
struct hlist_node *pos;
hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
if (tmp->cap_nr == cap)
if (tmp->cap.cap_nr == cap)
return tmp;
}
return NULL;
@ -807,6 +812,10 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
/* Power management related routines */
int pci_save_state(struct pci_dev *dev);
void pci_restore_state(struct pci_dev *dev);
struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
@ -828,6 +837,23 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
return __pci_enable_wake(dev, state, false, enable);
}
#define PCI_EXP_IDO_REQUEST (1<<0)
#define PCI_EXP_IDO_COMPLETION (1<<1)
void pci_enable_ido(struct pci_dev *dev, unsigned long type);
void pci_disable_ido(struct pci_dev *dev, unsigned long type);
enum pci_obff_signal_type {
PCI_EXP_OBFF_SIGNAL_L0,
PCI_EXP_OBFF_SIGNAL_ALWAYS,
};
int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type);
void pci_disable_obff(struct pci_dev *dev);
bool pci_ltr_supported(struct pci_dev *dev);
int pci_enable_ltr(struct pci_dev *dev);
void pci_disable_ltr(struct pci_dev *dev);
int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns);
/* For use by arch with custom probe code */
void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
@ -1207,6 +1233,23 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
return 0;
}
static inline void pci_enable_ido(struct pci_dev *dev, unsigned long type)
{
}
static inline void pci_disable_ido(struct pci_dev *dev, unsigned long type)
{
}
static inline int pci_enable_obff(struct pci_dev *dev, unsigned long type)
{
return 0;
}
static inline void pci_disable_obff(struct pci_dev *dev)
{
}
static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{
return -EIO;

View File

@ -2483,6 +2483,8 @@
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410

View File

@ -508,8 +508,18 @@
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */
#define PCI_EXP_OBFF_MASK 0xc0000 /* OBFF support mechanism */
#define PCI_EXP_OBFF_MSG 0x40000 /* New message signaling */
#define PCI_EXP_OBFF_WAKE 0x80000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_IDO_REQ_EN 0x100 /* ID-based ordering request enable */
#define PCI_EXP_IDO_CMP_EN 0x200 /* ID-based ordering completion enable */
#define PCI_EXP_LTR_EN 0x400 /* Latency tolerance reporting */
#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
@ -527,6 +537,7 @@
#define PCI_EXT_CAP_ID_ARI 14
#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
#define PCI_EXT_CAP_ID_LTR 24
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@ -683,6 +694,12 @@
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
#define PCI_ACS_SV 0x01 /* Source Validation */

View File

@ -197,8 +197,13 @@ static void kvm_free_assigned_device(struct kvm *kvm,
{
kvm_free_assigned_irq(kvm, assigned_dev);
__pci_reset_function(assigned_dev->dev);
pci_restore_state(assigned_dev->dev);
pci_reset_function(assigned_dev->dev);
if (pci_load_and_free_saved_state(assigned_dev->dev,
&assigned_dev->pci_saved_state))
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
__func__, dev_name(&assigned_dev->dev->dev));
else
pci_restore_state(assigned_dev->dev);
pci_release_regions(assigned_dev->dev);
pci_disable_device(assigned_dev->dev);
@ -516,7 +521,10 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
pci_reset_function(dev);
pci_save_state(dev);
match->pci_saved_state = pci_store_saved_state(dev);
if (!match->pci_saved_state)
printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
__func__, dev_name(&dev->dev));
match->assigned_dev_id = assigned_dev->assigned_dev_id;
match->host_segnr = assigned_dev->segnr;
match->host_busnr = assigned_dev->busnr;
@ -546,7 +554,9 @@ out:
mutex_unlock(&kvm->lock);
return r;
out_list_del:
pci_restore_state(dev);
if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
__func__, dev_name(&dev->dev));
list_del(&match->list);
pci_release_regions(dev);
out_disable: