1
0
Fork 0

pci-v4.19-changes

-----BEGIN PGP SIGNATURE-----
 
 iQJIBAABCgAyFiEEgMe7l+5h9hnxdsnuWYigwDrT+vwFAlt1f9AUHGJoZWxnYWFz
 QGdvb2dsZS5jb20ACgkQWYigwDrT+vxbdhAArnhRvkwOk4m4/LCuKF6HpmlxbBNC
 TjnBCenNf+lFXzWskfDFGFl/Wif4UzGbRTSCNQrwMzj3Ww3f/6R2QIq9rEJvyNC4
 VdxQnaBEZSUgN87q5UGqgdjMTo3zFvlFH6fpb5XDiQ5IX/QZeXeYqoB64w+HvKPU
 M+IsoOvnA5gb7pMcpchrGUnSfS1e6AqQbbTt6tZflore6YCEA4cH5OnpGx8qiZIp
 ut+CMBvQjQB01fHeBc/wGrVte4NwXdONrXqpUb4sHF7HqRNfEh0QVyPhvebBi+k1
 kquqoBQfPFTqgcab31VOcQhg70dEx+1qGm5/YBAwmhCpHR/g2gioFXoROsr+iUOe
 BtF6LZr+Y8cySuhJnkCrJBqWvvBaKbJLg0KMbI+7p4o9MZpod2u7LS5LFrlRDyKW
 3nz3o+b1+v3tCCKVKIhKo0ljolgkweQtR1f6KIHvq93wBODHVQnAOt9NlPfHVyks
 ryGBnOhMjoU5hvfexgIWFk9Ph9MEVQSffkI+TeFPO/tyGBfGfQyGtESiXuEaMQaH
 FGdZHX2RLkY3pWHOtWeMzRHzOnr2XjpDFcAqL3HBGPdJ30K3Umv3WOgoFe2SaocG
 0gaddPjKSwwM4Sa/VP+O5cjGuzi7QnczSDdpYjxIGZzBav32hqx4/rsnLw7bHH8y
 XkEme7cYJc8MGsA=
 =2Dmn
 -----END PGP SIGNATURE-----

Merge tag 'pci-v4.19-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci

Pull pci updates from Bjorn Helgaas:

 - Decode AER errors with names similar to "lspci" (Tyler Baicar)

 - Expose AER statistics in sysfs (Rajat Jain)

 - Clear AER status bits selectively based on the type of recovery (Oza
   Pawandeep)

 - Honor "pcie_ports=native" even if HEST sets FIRMWARE_FIRST (Alexandru
   Gagniuc)

 - Don't clear AER status bits if we're using the "Firmware-First"
   strategy where firmware owns the registers (Alexandru Gagniuc)

 - Use sysfs_match_string() to simplify ASPM sysfs parsing (Andy
   Shevchenko)

 - Remove unnecessary includes of <linux/pci-aspm.h> (Bjorn Helgaas)

 - Defer DPC event handling to work queue (Keith Busch)

 - Use threaded IRQ for DPC bottom half (Keith Busch)

 - Print AER status while handling DPC events (Keith Busch)

 - Work around IDT switch ACS Source Validation erratum (James
   Puthukattukaran)

 - Emit diagnostics for all cases of PCIe Link downtraining (Links
   operating slower than they're capable of) (Alexandru Gagniuc)

 - Skip VFs when configuring Max Payload Size (Myron Stowe)

 - Reduce Root Port Max Payload Size if necessary when hot-adding a
   device below it (Myron Stowe)

 - Simplify SHPC existence/permission checks (Bjorn Helgaas)

 - Remove hotplug sample skeleton driver (Lukas Wunner)

 - Convert pciehp to threaded IRQ handling (Lukas Wunner)

 - Improve pciehp tolerance of missed events and initially unstable
   links (Lukas Wunner)

 - Clear spurious pciehp events on resume (Lukas Wunner)

 - Add pciehp runtime PM support, including for Thunderbolt controllers
   (Lukas Wunner)

 - Support interrupts from pciehp bridges in D3hot (Lukas Wunner)

 - Mark fall-through switch cases before enabling -Wimplicit-fallthrough
   (Gustavo A. R. Silva)

 - Move DMA-debug PCI init from arch code to PCI core (Christoph
   Hellwig)

 - Fix pci_request_irq() usage of IRQF_ONESHOT when no handler is
   supplied (Heiner Kallweit)

 - Unify PCI and DMA direction #defines (Shunyong Yang)

 - Add PCI_DEVICE_DATA() macro (Andy Shevchenko)

 - Check for VPD completion before checking for timeout (Bert Kenward)

 - Limit Netronome NFP5000 config space size to work around erratum
   (Jakub Kicinski)

 - Set IRQCHIP_ONESHOT_SAFE for PCI MSI irqchips (Heiner Kallweit)

 - Document ACPI description of PCI host bridges (Bjorn Helgaas)

 - Add "pci=disable_acs_redir=" parameter to disable ACS redirection for
   peer-to-peer DMA support (we don't have the peer-to-peer support yet;
   this is just one piece) (Logan Gunthorpe)

 - Clean up devm_of_pci_get_host_bridge_resources() resource allocation
   (Jan Kiszka)

 - Fixup resizable BARs after suspend/resume (Christian König)

 - Make "pci=earlydump" generic (Sinan Kaya)

 - Fix ROM BAR access routines to stay in bounds and check for signature
   correctly (Rex Zhu)

 - Add DMA alias quirk for Microsemi Switchtec NTB (Doug Meyer)

 - Expand documentation for pci_add_dma_alias() (Logan Gunthorpe)

 - To avoid bus errors, enable PASID only if entire path supports
   End-End TLP prefixes (Sinan Kaya)

 - Unify slot and bus reset functions and remove hotplug knowledge from
   callers (Sinan Kaya)

 - Add Function-Level Reset quirks for Intel and Samsung NVMe devices to
   fix guest reboot issues (Alex Williamson)

 - Add function 1 DMA alias quirk for Marvell 88SS9183 PCIe SSD
   Controller (Bjorn Helgaas)

 - Remove Xilinx AXI-PCIe host bridge arch dependency (Palmer Dabbelt)

 - Remove Aardvark outbound window configuration (Evan Wang)

 - Fix Aardvark bridge window sizing issue (Zachary Zhang)

 - Convert Aardvark to use pci_host_probe() to reduce code duplication
   (Thomas Petazzoni)

 - Correct the Cadence cdns_pcie_writel() signature (Alan Douglas)

 - Add Cadence support for optional generic PHYs (Alan Douglas)

 - Add Cadence power management ops (Alan Douglas)

 - Remove redundant variable from Cadence driver (Colin Ian King)

 - Add Kirin MSI support (Xiaowei Song)

 - Drop unnecessary root_bus_nr setting from exynos, imx6, keystone,
   armada8k, artpec6, designware-plat, histb, qcom, spear13xx (Shawn
   Guo)

 - Move link notification settings from DesignWare core to individual
   drivers (Gustavo Pimentel)

 - Add endpoint library MSI-X interfaces (Gustavo Pimentel)

 - Correct signature of endpoint library IRQ interfaces (Gustavo
   Pimentel)

 - Add DesignWare endpoint library MSI-X callbacks (Gustavo Pimentel)

 - Add endpoint library MSI-X test support (Gustavo Pimentel)

 - Remove unnecessary GFP_ATOMIC from Hyper-V "new child" allocation
   (Jia-Ju Bai)

 - Add more devices to Broadcom PAXC quirk (Ray Jui)

 - Work around corrupted Broadcom PAXC config space to enable SMMU and
   GICv3 ITS (Ray Jui)

 - Disable MSI parsing to work around broken Broadcom PAXC logic in some
   devices (Ray Jui)

 - Hide unconfigured functions to work around a Broadcom PAXC defect
   (Ray Jui)

 - Lower iproc log level to reduce console output during boot (Ray Jui)

 - Fix mobiveil iomem/phys_addr_t type usage (Lorenzo Pieralisi)

 - Fix mobiveil missing include file (Lorenzo Pieralisi)

 - Add mobiveil Kconfig/Makefile support (Lorenzo Pieralisi)

 - Fix mvebu I/O space remapping issues (Thomas Petazzoni)

 - Use generic pci_host_bridge in mvebu instead of ARM-specific API
   (Thomas Petazzoni)

 - Whitelist VMD devices with fast interrupt handlers to avoid sharing
   vectors with slow handlers (Keith Busch)

* tag 'pci-v4.19-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (153 commits)
  PCI/AER: Don't clear AER bits if error handling is Firmware-First
  PCI: Limit config space size for Netronome NFP5000
  PCI/MSI: Set IRQCHIP_ONESHOT_SAFE for PCI-MSI irqchips
  PCI/VPD: Check for VPD access completion before checking for timeout
  PCI: Add PCI_DEVICE_DATA() macro to fully describe device ID entry
  PCI: Match Root Port's MPS to endpoint's MPSS as necessary
  PCI: Skip MPS logic for Virtual Functions (VFs)
  PCI: Add function 1 DMA alias quirk for Marvell 88SS9183
  PCI: Check for PCIe Link downtraining
  PCI: Add ACS Redirect disable quirk for Intel Sunrise Point
  PCI: Add device-specific ACS Redirect disable infrastructure
  PCI: Convert device-specific ACS quirks from NULL termination to ARRAY_SIZE
  PCI: Add "pci=disable_acs_redir=" parameter for peer-to-peer support
  PCI: Allow specifying devices using a base bus and path of devfns
  PCI: Make specifying PCI devices in kernel parameters reusable
  PCI: Hide ACS quirk declarations inside PCI core
  PCI: Delay after FLR of Intel DC P3700 NVMe
  PCI: Disable Samsung SM961/PM961 NVMe before FLR
  PCI: Export pcie_has_flr()
  PCI: mvebu: Drop bogus comment above mvebu_pcie_map_registers()
  ...
zero-colors
Linus Torvalds 2018-08-16 09:21:54 -07:00
commit 4e31843f68
115 changed files with 3593 additions and 1808 deletions

View File

@ -0,0 +1,122 @@
==========================
PCIe Device AER statistics
==========================
These attributes show up under all the devices that are AER capable. These
statistical counters indicate the errors "as seen/reported by the device".
Note that this may mean that if an endpoint is causing problems, the AER
counters may increment at its link partner (e.g. root port) because the
errors may be "seen" / reported by the link partner and not the
problematic endpoint itself (which may report all counters as 0 as it never
saw any problems).
Where: /sys/bus/pci/devices/<dev>/aer_dev_correctable
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: List of correctable errors seen and reported by this
PCI device using ERR_COR. Note that since multiple errors may
be reported using a single ERR_COR message, thus
TOTAL_ERR_COR at the end of the file may not match the actual
total of all the errors in the file. Sample output:
-------------------------------------------------------------------------
localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_correctable
Receiver Error 2
Bad TLP 0
Bad DLLP 0
RELAY_NUM Rollover 0
Replay Timer Timeout 0
Advisory Non-Fatal 0
Corrected Internal Error 0
Header Log Overflow 0
TOTAL_ERR_COR 2
-------------------------------------------------------------------------
Where: /sys/bus/pci/devices/<dev>/aer_dev_fatal
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: List of uncorrectable fatal errors seen and reported by this
PCI device using ERR_FATAL. Note that since multiple errors may
be reported using a single ERR_FATAL message, thus
TOTAL_ERR_FATAL at the end of the file may not match the actual
total of all the errors in the file. Sample output:
-------------------------------------------------------------------------
localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_fatal
Undefined 0
Data Link Protocol 0
Surprise Down Error 0
Poisoned TLP 0
Flow Control Protocol 0
Completion Timeout 0
Completer Abort 0
Unexpected Completion 0
Receiver Overflow 0
Malformed TLP 0
ECRC 0
Unsupported Request 0
ACS Violation 0
Uncorrectable Internal Error 0
MC Blocked TLP 0
AtomicOp Egress Blocked 0
TLP Prefix Blocked Error 0
TOTAL_ERR_FATAL 0
-------------------------------------------------------------------------
Where: /sys/bus/pci/devices/<dev>/aer_dev_nonfatal
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: List of uncorrectable nonfatal errors seen and reported by this
PCI device using ERR_NONFATAL. Note that since multiple errors
may be reported using a single ERR_FATAL message, thus
TOTAL_ERR_NONFATAL at the end of the file may not match the
actual total of all the errors in the file. Sample output:
-------------------------------------------------------------------------
localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_nonfatal
Undefined 0
Data Link Protocol 0
Surprise Down Error 0
Poisoned TLP 0
Flow Control Protocol 0
Completion Timeout 0
Completer Abort 0
Unexpected Completion 0
Receiver Overflow 0
Malformed TLP 0
ECRC 0
Unsupported Request 0
ACS Violation 0
Uncorrectable Internal Error 0
MC Blocked TLP 0
AtomicOp Egress Blocked 0
TLP Prefix Blocked Error 0
TOTAL_ERR_NONFATAL 0
-------------------------------------------------------------------------
============================
PCIe Rootport AER statistics
============================
These attributes show up under only the rootports (or root complex event
collectors) that are AER capable. These indicate the number of error messages as
"reported to" the rootport. Please note that the rootports also transmit
(internally) the ERR_* messages for errors seen by the internal rootport PCI
device, so these counters include them and are thus cumulative of all the error
messages on the PCI hierarchy originating at that root port.
Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_cor
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: Total number of ERR_COR messages reported to rootport.
Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_fatal
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: Total number of ERR_FATAL messages reported to rootport.
Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_nonfatal
Date: July 2018
Kernel Version: 4.19.0
Contact: linux-pci@vger.kernel.org, rajatja@google.com
Description: Total number of ERR_NONFATAL messages reported to rootport.

View File

@ -1,5 +1,7 @@
00-INDEX
- this file
acpi-info.txt
- info on how PCI host bridges are represented in ACPI
MSI-HOWTO.txt
- the Message Signaled Interrupts (MSI) Driver Guide HOWTO and FAQ.
PCIEBUS-HOWTO.txt

View File

@ -0,0 +1,187 @@
ACPI considerations for PCI host bridges
The general rule is that the ACPI namespace should describe everything the
OS might use unless there's another way for the OS to find it [1, 2].
For example, there's no standard hardware mechanism for enumerating PCI
host bridges, so the ACPI namespace must describe each host bridge, the
method for accessing PCI config space below it, the address space windows
the host bridge forwards to PCI (using _CRS), and the routing of legacy
INTx interrupts (using _PRT).
PCI devices, which are below the host bridge, generally do not need to be
described via ACPI. The OS can discover them via the standard PCI
enumeration mechanism, using config accesses to discover and identify
devices and read and size their BARs. However, ACPI may describe PCI
devices if it provides power management or hotplug functionality for them
or if the device has INTx interrupts connected by platform interrupt
controllers and a _PRT is needed to describe those connections.
ACPI resource description is done via _CRS objects of devices in the ACPI
namespace [2].   The _CRS is like a generalized PCI BAR: the OS can read
_CRS and figure out what resource is being consumed even if it doesn't have
a driver for the device [3].  That's important because it means an old OS
can work correctly even on a system with new devices unknown to the OS.
The new devices might not do anything, but the OS can at least make sure no
resources conflict with them.
Static tables like MCFG, HPET, ECDT, etc., are *not* mechanisms for
reserving address space. The static tables are for things the OS needs to
know early in boot, before it can parse the ACPI namespace. If a new table
is defined, an old OS needs to operate correctly even though it ignores the
table. _CRS allows that because it is generic and understood by the old
OS; a static table does not.
If the OS is expected to manage a non-discoverable device described via
ACPI, that device will have a specific _HID/_CID that tells the OS what
driver to bind to it, and the _CRS tells the OS and the driver where the
device's registers are.
PCI host bridges are PNP0A03 or PNP0A08 devices.  Their _CRS should
describe all the address space they consume.  This includes all the windows
they forward down to the PCI bus, as well as registers of the host bridge
itself that are not forwarded to PCI.  The host bridge registers include
things like secondary/subordinate bus registers that determine the bus
range below the bridge, window registers that describe the apertures, etc.
These are all device-specific, non-architected things, so the only way a
PNP0A03/PNP0A08 driver can manage them is via _PRS/_CRS/_SRS, which contain
the device-specific details.  The host bridge registers also include ECAM
space, since it is consumed by the host bridge.
ACPI defines a Consumer/Producer bit to distinguish the bridge registers
("Consumer") from the bridge apertures ("Producer") [4, 5], but early
BIOSes didn't use that bit correctly. The result is that the current ACPI
spec defines Consumer/Producer only for the Extended Address Space
descriptors; the bit should be ignored in the older QWord/DWord/Word
Address Space descriptors. Consequently, OSes have to assume all
QWord/DWord/Word descriptors are windows.
Prior to the addition of Extended Address Space descriptors, the failure of
Consumer/Producer meant there was no way to describe bridge registers in
the PNP0A03/PNP0A08 device itself. The workaround was to describe the
bridge registers (including ECAM space) in PNP0C02 catch-all devices [6].
With the exception of ECAM, the bridge register space is device-specific
anyway, so the generic PNP0A03/PNP0A08 driver (pci_root.c) has no need to
know about it.  
New architectures should be able to use "Consumer" Extended Address Space
descriptors in the PNP0A03 device for bridge registers, including ECAM,
although a strict interpretation of [6] might prohibit this. Old x86 and
ia64 kernels assume all address space descriptors, including "Consumer"
Extended Address Space ones, are windows, so it would not be safe to
describe bridge registers this way on those architectures.
PNP0C02 "motherboard" devices are basically a catch-all.  There's no
programming model for them other than "don't use these resources for
anything else."  So a PNP0C02 _CRS should claim any address space that is
(1) not claimed by _CRS under any other device object in the ACPI namespace
and (2) should not be assigned by the OS to something else.
The PCIe spec requires the Enhanced Configuration Access Method (ECAM)
unless there's a standard firmware interface for config access, e.g., the
ia64 SAL interface [7]. A host bridge consumes ECAM memory address space
and converts memory accesses into PCI configuration accesses. The spec
defines the ECAM address space layout and functionality; only the base of
the address space is device-specific. An ACPI OS learns the base address
from either the static MCFG table or a _CBA method in the PNP0A03 device.
The MCFG table must describe the ECAM space of non-hot pluggable host
bridges [8]. Since MCFG is a static table and can't be updated by hotplug,
a _CBA method in the PNP0A03 device describes the ECAM space of a
hot-pluggable host bridge [9]. Note that for both MCFG and _CBA, the base
address always corresponds to bus 0, even if the bus range below the bridge
(which is reported via _CRS) doesn't start at 0.
[1] ACPI 6.2, sec 6.1:
For any device that is on a non-enumerable type of bus (for example, an
ISA bus), OSPM enumerates the devices' identifier(s) and the ACPI
system firmware must supply an _HID object ... for each device to
enable OSPM to do that.
[2] ACPI 6.2, sec 3.7:
The OS enumerates motherboard devices simply by reading through the
ACPI Namespace looking for devices with hardware IDs.
Each device enumerated by ACPI includes ACPI-defined objects in the
ACPI Namespace that report the hardware resources the device could
occupy [_PRS], an object that reports the resources that are currently
used by the device [_CRS], and objects for configuring those resources
[_SRS]. The information is used by the Plug and Play OS (OSPM) to
configure the devices.
[3] ACPI 6.2, sec 6.2:
OSPM uses device configuration objects to configure hardware resources
for devices enumerated via ACPI. Device configuration objects provide
information about current and possible resource requirements, the
relationship between shared resources, and methods for configuring
hardware resources.
When OSPM enumerates a device, it calls _PRS to determine the resource
requirements of the device. It may also call _CRS to find the current
resource settings for the device. Using this information, the Plug and
Play system determines what resources the device should consume and
sets those resources by calling the devices _SRS control method.
In ACPI, devices can consume resources (for example, legacy keyboards),
provide resources (for example, a proprietary PCI bridge), or do both.
Unless otherwise specified, resources for a device are assumed to be
taken from the nearest matching resource above the device in the device
hierarchy.
[4] ACPI 6.2, sec 6.4.3.5.1, 2, 3, 4:
QWord/DWord/Word Address Space Descriptor (.1, .2, .3)
General Flags: Bit [0] Ignored
Extended Address Space Descriptor (.4)
General Flags: Bit [0] Consumer/Producer:
1This device consumes this resource
0This device produces and consumes this resource
[5] ACPI 6.2, sec 19.6.43:
ResourceUsage specifies whether the Memory range is consumed by
this device (ResourceConsumer) or passed on to child devices
(ResourceProducer). If nothing is specified, then
ResourceConsumer is assumed.
[6] PCI Firmware 3.2, sec 4.1.2:
If the operating system does not natively comprehend reserving the
MMCFG region, the MMCFG region must be reserved by firmware. The
address range reported in the MCFG table or by _CBA method (see Section
4.1.3) must be reserved by declaring a motherboard resource. For most
systems, the motherboard resource would appear at the root of the ACPI
namespace (under \_SB) in a node with a _HID of EISAID (PNP0C02), and
the resources in this case should not be claimed in the root PCI buss
_CRS. The resources can optionally be returned in Int15 E820 or
EFIGetMemoryMap as reserved memory but must always be reported through
ACPI as a motherboard resource.
[7] PCI Express 4.0, sec 7.2.2:
For systems that are PC-compatible, or that do not implement a
processor-architecture-specific firmware interface standard that allows
access to the Configuration Space, the ECAM is required as defined in
this section.
[8] PCI Firmware 3.2, sec 4.1.2:
The MCFG table is an ACPI table that is used to communicate the base
addresses corresponding to the non-hot removable PCI Segment Groups
range within a PCI Segment Group available to the operating system at
boot. This is required for the PC-compatible systems.
The MCFG table is only used to communicate the base addresses
corresponding to the PCI Segment Groups available to the system at
boot.
[9] PCI Firmware 3.2, sec 4.1.3:
The _CBA (Memory mapped Configuration Base Address) control method is
an optional ACPI object that returns the 64-bit memory mapped
configuration base address for the hot plug capable host bridge. The
base address returned by _CBA is processor-relative address. The _CBA
control method evaluates to an Integer.
This control method appears under a host bridge object. When the _CBA
method appears under an active host bridge object, the operating system
evaluates this structure to identify the memory mapped configuration
base address corresponding to the PCI Segment Group for the bus number
range specified in _CRS method. An ACPI name space object that contains
the _CBA method must also contain a corresponding _SEG method.

View File

@ -15,3 +15,5 @@ subsys_id : don't care
interrupt_pin : Should be 1 - INTA, 2 - INTB, 3 - INTC, 4 -INTD
msi_interrupts : Should be 1 to 32 depending on the number of MSI interrupts
to test
msix_interrupts : Should be 1 to 2048 depending on the number of MSI-X
interrupts to test

View File

@ -44,7 +44,7 @@ by the PCI controller driver.
* clear_bar: ops to reset the BAR
* alloc_addr_space: ops to allocate in PCI controller address space
* free_addr_space: ops to free the allocated address space
* raise_irq: ops to raise a legacy or MSI interrupt
* raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* start: ops to start the PCI link
* stop: ops to stop the PCI link
@ -96,7 +96,7 @@ by the PCI endpoint function driver.
*) pci_epc_raise_irq()
The PCI endpoint function driver should use pci_epc_raise_irq() to raise
Legacy Interrupt or MSI Interrupt.
Legacy Interrupt, MSI or MSI-X Interrupt.
*) pci_epc_mem_alloc_addr()

View File

@ -20,6 +20,8 @@ The PCI endpoint test device has the following registers:
5) PCI_ENDPOINT_TEST_DST_ADDR
6) PCI_ENDPOINT_TEST_SIZE
7) PCI_ENDPOINT_TEST_CHECKSUM
8) PCI_ENDPOINT_TEST_IRQ_TYPE
9) PCI_ENDPOINT_TEST_IRQ_NUMBER
*) PCI_ENDPOINT_TEST_MAGIC
@ -34,10 +36,10 @@ that the endpoint device must perform.
Bitfield Description:
Bit 0 : raise legacy IRQ
Bit 1 : raise MSI IRQ
Bit 2 - 7 : MSI interrupt number
Bit 8 : read command (read data from RC buffer)
Bit 9 : write command (write data to RC buffer)
Bit 10 : copy command (copy data from one RC buffer to another
Bit 2 : raise MSI-X IRQ
Bit 3 : read command (read data from RC buffer)
Bit 4 : write command (write data to RC buffer)
Bit 5 : copy command (copy data from one RC buffer to another
RC buffer)
*) PCI_ENDPOINT_TEST_STATUS
@ -64,3 +66,22 @@ COPY/READ command.
This register contains the destination address (RC buffer address) for
the COPY/WRITE command.
*) PCI_ENDPOINT_TEST_IRQ_TYPE
This register contains the interrupt type (Legacy/MSI) triggered
for the READ/WRITE/COPY and raise IRQ (Legacy/MSI) commands.
Possible types:
- Legacy : 0
- MSI : 1
- MSI-X : 2
*) PCI_ENDPOINT_TEST_IRQ_NUMBER
This register contains the triggered ID interrupt.
Admissible values:
- Legacy : 0
- MSI : [1 .. 32]
- MSI-X : [1 .. 2048]

View File

@ -45,9 +45,9 @@ The PCI endpoint framework populates the directory with the following
configurable fields.
# ls functions/pci_epf_test/func1
baseclass_code interrupt_pin revid subsys_vendor_id
cache_line_size msi_interrupts subclass_code vendorid
deviceid progif_code subsys_id
baseclass_code interrupt_pin progif_code subsys_id
cache_line_size msi_interrupts revid subsys_vendorid
deviceid msix_interrupts subclass_code vendorid
The PCI endpoint function driver populates these entries with default values
when the device is bound to the driver. The pci-epf-test driver populates
@ -67,6 +67,7 @@ device, the following commands can be used.
# echo 0x104c > functions/pci_epf_test/func1/vendorid
# echo 0xb500 > functions/pci_epf_test/func1/deviceid
# echo 16 > functions/pci_epf_test/func1/msi_interrupts
# echo 8 > functions/pci_epf_test/func1/msix_interrupts
1.5 Binding pci-epf-test Device to EP Controller
@ -120,7 +121,9 @@ following commands.
Interrupt tests
SET IRQ TYPE TO LEGACY: OKAY
LEGACY IRQ: NOT OKAY
SET IRQ TYPE TO MSI: OKAY
MSI1: OKAY
MSI2: OKAY
MSI3: OKAY
@ -153,9 +156,30 @@ following commands.
MSI30: NOT OKAY
MSI31: NOT OKAY
MSI32: NOT OKAY
SET IRQ TYPE TO MSI-X: OKAY
MSI-X1: OKAY
MSI-X2: OKAY
MSI-X3: OKAY
MSI-X4: OKAY
MSI-X5: OKAY
MSI-X6: OKAY
MSI-X7: OKAY
MSI-X8: OKAY
MSI-X9: NOT OKAY
MSI-X10: NOT OKAY
MSI-X11: NOT OKAY
MSI-X12: NOT OKAY
MSI-X13: NOT OKAY
MSI-X14: NOT OKAY
MSI-X15: NOT OKAY
MSI-X16: NOT OKAY
[...]
MSI-X2047: NOT OKAY
MSI-X2048: NOT OKAY
Read Tests
SET IRQ TYPE TO MSI: OKAY
READ ( 1 bytes): OKAY
READ ( 1024 bytes): OKAY
READ ( 1025 bytes): OKAY

View File

@ -73,6 +73,11 @@ In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
other fields.
2.4 AER Statistics / Counters
When PCIe AER errors are captured, the counters / statistics are also exposed
in the form of sysfs attributes which are documented at
Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
3. Developer Guide

View File

@ -3089,8 +3089,31 @@
See header of drivers/block/paride/pcd.c.
See also Documentation/blockdev/paride.txt.
pci=option[,option...] [PCI] various PCI subsystem options:
earlydump [X86] dump PCI config space before the kernel
pci=option[,option...] [PCI] various PCI subsystem options.
Some options herein operate on a specific device
or a set of devices (<pci_dev>). These are
specified in one of the following formats:
[<domain>:]<bus>:<dev>.<func>[/<dev>.<func>]*
pci:<vendor>:<device>[:<subvendor>:<subdevice>]
Note: the first format specifies a PCI
bus/device/function address which may change
if new hardware is inserted, if motherboard
firmware changes, or due to changes caused
by other kernel parameters. If the
domain is left unspecified, it is
taken to be zero. Optionally, a path
to a device through multiple device/function
addresses can be specified after the base
address (this is more robust against
renumbering issues). The second format
selects devices using IDs from the
configuration space which may match multiple
devices in the system.
earlydump dump PCI config space before the kernel
changes anything
off [X86] don't probe for the PCI bus
bios [X86-32] force use of PCI BIOS, don't access
@ -3218,11 +3241,10 @@
window. The default value is 64 megabytes.
resource_alignment=
Format:
[<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
[<order of align>@]pci:<vendor>:<device>\
[:<subvendor>:<subdevice>][; ...]
[<order of align>@]<pci_dev>[; ...]
Specifies alignment and device to reassign
aligned memory resources.
aligned memory resources. How to
specify the device is described above.
If <order of align> is not specified,
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
@ -3265,6 +3287,15 @@
Adding the window is slightly risky (it may
conflict with unreported devices), so this
taints the kernel.
disable_acs_redir=<pci_dev>[; ...]
Specify one or more PCI devices (in the format
specified above) separated by semicolons.
Each device specified will have the PCI ACS
redirect capabilities forced off which will
allow P2P traffic between devices through
bridges without forcing it upstream. Note:
this removes isolation between devices and
may put more devices in an IOMMU group.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.

View File

@ -9,6 +9,9 @@ Required properties:
Optional properties:
- max-functions: Maximum number of functions that can be configured (default 1).
- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
than one in the list. If only one PHY listed it must manage all lanes.
- phy-names: List of names to identify the PHY.
Example:
@ -19,4 +22,6 @@ pcie@fc000000 {
reg-names = "reg", "mem";
cdns,max-outbound-regions = <16>;
max-functions = /bits/ 8 <8>;
phys = <&ep_phy0 &ep_phy1>;
phy-names = "pcie-lane0","pcie-lane1";
};

View File

@ -24,6 +24,9 @@ Optional properties:
translations (default 32)
- vendor-id: The PCI vendor ID (16 bits, default is design dependent)
- device-id: The PCI device ID (16 bits, default is design dependent)
- phys: From PHY bindings: List of Generic PHY phandles. One per lane if more
than one in the list. If only one PHY listed it must manage all lanes.
- phy-names: List of names to identify the PHY.
Example:
@ -57,4 +60,7 @@ pcie@fb000000 {
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-parent = <&its_pci>;
phys = <&pcie_phy0>;
phy-names = "pcie-phy";
};

View File

@ -166,6 +166,7 @@ Code Seq#(hex) Include File Comments
'P' all linux/soundcard.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!

View File

@ -10,6 +10,7 @@ The PCI driver for the test device performs the following tests
*) verifying addresses programmed in BAR
*) raise legacy IRQ
*) raise MSI IRQ
*) raise MSI-X IRQ
*) read data
*) write data
*) copy data
@ -25,6 +26,11 @@ ioctl
PCITEST_LEGACY_IRQ: Tests legacy IRQ
PCITEST_MSI: Tests message signalled interrupts. The MSI number
to be tested should be passed as argument.
PCITEST_MSIX: Tests message signalled interrupts. The MSI-X number
to be tested should be passed as argument.
PCITEST_SET_IRQTYPE: Changes driver IRQ type configuration. The IRQ type
should be passed as argument (0: Legacy, 1:MSI, 2:MSI-X).
PCITEST_GET_IRQTYPE: Gets driver IRQ type configuration.
PCITEST_WRITE: Perform write tests. The size of the buffer should be passed
as argument.
PCITEST_READ: Perform read tests. The size of the buffer should be passed

View File

@ -357,9 +357,6 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
{
#ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type);
#endif
#ifdef CONFIG_IBMVIO
dma_debug_add_bus(&vio_bus_type);
#endif

View File

@ -160,8 +160,6 @@ static int __init pcibios_init(void)
for (hose = hose_head; hose; hose = hose->next)
pcibios_scanbus(hose);
dma_debug_add_bus(&pci_bus_type);
pci_initialized = 1;
return 0;

View File

@ -15,8 +15,4 @@ extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
extern int early_pci_allowed(void);
extern unsigned int pci_early_dump_regs;
extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
extern void early_dump_pci_devices(void);
#endif /* _ASM_X86_PCI_DIRECT_H */

View File

@ -155,9 +155,6 @@ static int __init pci_iommu_init(void)
{
struct iommu_table_entry *p;
#ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type);
#endif
x86_init.iommu.iommu_init();
for (p = __iommu_table; p < __iommu_table_end; p++) {

View File

@ -999,11 +999,6 @@ void __init setup_arch(char **cmdline_p)
setup_clear_cpu_cap(X86_FEATURE_APIC);
}
#ifdef CONFIG_PCI
if (pci_early_dump_regs)
early_dump_pci_devices();
#endif
e820__reserve_setup_data();
e820__finish_early_params();

View File

@ -22,7 +22,6 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF;
unsigned int pci_early_dump_regs;
static int pci_bf_sort;
int pci_routeirq;
int noioapicquirk;
@ -599,9 +598,6 @@ char *__init pcibios_setup(char *str)
pci_probe |= PCI_BIG_ROOT_WINDOW;
return NULL;
#endif
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
return NULL;
} else if (!strcmp(str, "routeirq")) {
pci_routeirq = 1;
return NULL;

View File

@ -57,47 +57,3 @@ int early_pci_allowed(void)
PCI_PROBE_CONF1;
}
void early_dump_pci_device(u8 bus, u8 slot, u8 func)
{
u32 value[256 / 4];
int i;
pr_info("pci 0000:%02x:%02x.%d config space:\n", bus, slot, func);
for (i = 0; i < 256; i += 4)
value[i / 4] = read_pci_config(bus, slot, func, i);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false);
}
void early_dump_pci_devices(void)
{
unsigned bus, slot, func;
if (!early_pci_allowed())
return;
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
u32 class;
u8 type;
class = read_pci_config(bus, slot, func,
PCI_CLASS_REVISION);
if (class == 0xffffffff)
continue;
early_dump_pci_device(bus, slot, func);
if (func == 0) {
type = read_pci_config_byte(bus, slot,
func,
PCI_HEADER_TYPE);
if (!(type & 0x80))
break;
}
}
}
}
}

View File

@ -905,9 +905,7 @@ static int trigger_sbr(struct hfi1_devdata *dd)
* delay after a reset is required. Per spec requirements,
* the link is either working or not after that point.
*/
pci_reset_bridge_secondary_bus(dev->bus->self);
return 0;
return pci_reset_bus(dev);
}
/*

View File

@ -35,38 +35,45 @@
#include <uapi/linux/pcitest.h>
#define DRV_MODULE_NAME "pci-endpoint-test"
#define DRV_MODULE_NAME "pci-endpoint-test"
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define IRQ_TYPE_UNDEFINED -1
#define IRQ_TYPE_LEGACY 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define PCI_ENDPOINT_TEST_COMMAND 0x4
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define MSI_NUMBER_SHIFT 2
/* 6 bits for MSI number */
#define COMMAND_READ BIT(8)
#define COMMAND_WRITE BIT(9)
#define COMMAND_COPY BIT(10)
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define PCI_ENDPOINT_TEST_STATUS 0x8
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
#define STATUS_WRITE_SUCCESS BIT(2)
#define STATUS_WRITE_FAIL BIT(3)
#define STATUS_COPY_SUCCESS BIT(4)
#define STATUS_COPY_FAIL BIT(5)
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
#define PCI_ENDPOINT_TEST_COMMAND 0x4
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0xc
#define PCI_ENDPOINT_TEST_STATUS 0x8
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
#define STATUS_WRITE_SUCCESS BIT(2)
#define STATUS_WRITE_FAIL BIT(3)
#define STATUS_COPY_SUCCESS BIT(4)
#define STATUS_COPY_FAIL BIT(5)
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
#define PCI_ENDPOINT_TEST_SIZE 0x1c
#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
#define PCI_ENDPOINT_TEST_SIZE 0x1c
#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
static DEFINE_IDA(pci_endpoint_test_ida);
@ -77,6 +84,10 @@ static bool no_msi;
module_param(no_msi, bool, 0444);
MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
static int irq_type = IRQ_TYPE_MSI;
module_param(irq_type, int, 0444);
MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
enum pci_barno {
BAR_0,
BAR_1,
@ -103,7 +114,7 @@ struct pci_endpoint_test {
struct pci_endpoint_test_data {
enum pci_barno test_reg_bar;
size_t alignment;
bool no_msi;
int irq_type;
};
static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
@ -147,6 +158,100 @@ static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
{
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
}
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
int type)
{
int irq = -1;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
bool res = true;
switch (type) {
case IRQ_TYPE_LEGACY:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (irq < 0)
dev_err(dev, "Failed to get Legacy interrupt\n");
break;
case IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
dev_err(dev, "Failed to get MSI interrupts\n");
break;
case IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
if (irq < 0)
dev_err(dev, "Failed to get MSI-X interrupts\n");
break;
default:
dev_err(dev, "Invalid IRQ type selected\n");
}
if (irq < 0) {
irq = 0;
res = false;
}
test->num_irqs = irq;
return res;
}
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
{
int i;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), test);
test->num_irqs = 0;
}
static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
{
int i;
int err;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err)
goto fail;
}
return true;
fail:
switch (irq_type) {
case IRQ_TYPE_LEGACY:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
break;
case IRQ_TYPE_MSI:
dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
case IRQ_TYPE_MSIX:
dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
}
return false;
}
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
@ -179,6 +284,9 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
{
u32 val;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
IRQ_TYPE_LEGACY);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_RAISE_LEGACY_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
@ -190,14 +298,18 @@ static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
}
static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
u8 msi_num)
u16 msi_num, bool msix)
{
u32 val;
struct pci_dev *pdev = test->pdev;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix == false ? IRQ_TYPE_MSI :
IRQ_TYPE_MSIX);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
msi_num << MSI_NUMBER_SHIFT |
COMMAND_RAISE_MSI_IRQ);
msix == false ? COMMAND_RAISE_MSI_IRQ :
COMMAND_RAISE_MSIX_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
@ -230,6 +342,11 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_src_addr = dma_alloc_coherent(dev, size + alignment,
&orig_src_phys_addr, GFP_KERNEL);
if (!orig_src_addr) {
@ -281,8 +398,10 @@ static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
COMMAND_COPY);
wait_for_completion(&test->irq_raised);
@ -318,6 +437,11 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@ -348,8 +472,10 @@ static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1 << MSI_NUMBER_SHIFT | COMMAND_READ);
COMMAND_READ);
wait_for_completion(&test->irq_raised);
@ -379,6 +505,11 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
if (size > SIZE_MAX - alignment)
goto err;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
GFP_KERNEL);
if (!orig_addr) {
@ -403,8 +534,10 @@ static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
COMMAND_WRITE);
wait_for_completion(&test->irq_raised);
@ -417,6 +550,38 @@ err:
return ret;
}
static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return false;
}
if (irq_type == req_irq_type)
return true;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
goto err;
if (!pci_endpoint_test_request_irq(test))
goto err;
irq_type = req_irq_type;
return true;
err:
pci_endpoint_test_free_irq_vectors(test);
irq_type = IRQ_TYPE_UNDEFINED;
return false;
}
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@ -436,7 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
ret = pci_endpoint_test_legacy_irq(test);
break;
case PCITEST_MSI:
ret = pci_endpoint_test_msi_irq(test, arg);
case PCITEST_MSIX:
ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
break;
case PCITEST_WRITE:
ret = pci_endpoint_test_write(test, arg);
@ -447,6 +613,12 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
case PCITEST_COPY:
ret = pci_endpoint_test_copy(test, arg);
break;
case PCITEST_SET_IRQTYPE:
ret = pci_endpoint_test_set_irq(test, arg);
break;
case PCITEST_GET_IRQTYPE:
ret = irq_type;
break;
}
ret:
@ -462,9 +634,7 @@ static const struct file_operations pci_endpoint_test_fops = {
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int i;
int err;
int irq = 0;
int id;
char name[20];
enum pci_barno bar;
@ -486,11 +656,14 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
test->alignment = 0;
test->pdev = pdev;
if (no_msi)
irq_type = IRQ_TYPE_LEGACY;
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
test->alignment = data->alignment;
no_msi = data->no_msi;
irq_type = data->irq_type;
}
init_completion(&test->irq_raised);
@ -510,28 +683,11 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
pci_set_master(pdev);
if (!no_msi) {
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
dev_err(dev, "Failed to get MSI interrupts\n");
test->num_irqs = irq;
}
if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
goto err_disable_irq;
err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err) {
dev_err(dev, "Failed to request IRQ %d\n", pdev->irq);
goto err_disable_msi;
}
for (i = 1; i < irq; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, test);
if (err)
dev_err(dev, "failed to request IRQ %d for MSI %d\n",
pci_irq_vector(pdev, i), i + 1);
}
if (!pci_endpoint_test_request_irq(test))
goto err_disable_irq;
for (bar = BAR_0; bar <= BAR_5; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
@ -590,12 +746,10 @@ err_iounmap:
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
pci_endpoint_test_release_irq(test);
for (i = 0; i < irq; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
err_disable_msi:
pci_disable_msi(pdev);
err_disable_irq:
pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
err_disable_pdev:
@ -607,7 +761,6 @@ err_disable_pdev:
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
int i;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
@ -624,9 +777,10 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
for (i = 0; i < test->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), test);
pci_disable_msi(pdev);
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
pci_disable_device(pdev);
}

View File

@ -22,7 +22,6 @@
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/ip.h>

View File

@ -18,7 +18,6 @@
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/module.h>
#include "ath9k.h"

View File

@ -72,7 +72,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/acpi.h>
#include "fw/acpi.h"

View File

@ -27,12 +27,6 @@
#include <linux/io.h>
#include <linux/pci.h>
/*
* This variable should be used with the
* pci_driver structure initialization.
*/
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
/*
* PCI driver handlers.
*/

View File

@ -19,6 +19,7 @@
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/ntb.h>
#include <linux/pci.h>
MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
MODULE_VERSION("0.1");
@ -1487,7 +1488,7 @@ static int switchtec_ntb_add(struct device *dev,
stdev->sndev = NULL;
if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
return -ENODEV;
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));

View File

@ -273,6 +273,9 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
if (!pdev->eetlp_prefix_path)
return -EINVAL;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;

View File

@ -102,7 +102,7 @@ config PCI_HOST_GENERIC
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST
depends on OF || COMPILE_TEST
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
@ -239,6 +239,16 @@ config PCIE_MEDIATEK
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
config PCIE_MOBIVEIL
bool "Mobiveil AXI PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
help
Say Y here if you want to enable support for the Mobiveil AXI PCIe
Soft IP. It has up to 8 outbound and inbound windows
for address translation and it is a PCIe Gen4 IP.
config PCIE_TANGO_SMP8759
bool "Tango SMP8759 PCIe controller (DANGEROUS)"
depends on ARCH_TANGO && PCI_MSI && OF

View File

@ -26,6 +26,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW

View File

@ -370,7 +370,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);

View File

@ -421,7 +421,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
}
}
pp->root_bus_nr = -1;
pp->ops = &exynos_pcie_host_ops;
ret = dw_pcie_host_init(pp);

View File

@ -667,7 +667,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
}
}
pp->root_bus_nr = -1;
pp->ops = &imx6_pcie_host_ops;
ret = dw_pcie_host_init(pp);

View File

@ -347,7 +347,6 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {

View File

@ -172,7 +172,6 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct device *dev = &pdev->dev;
int ret;
pp->root_bus_nr = -1;
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);

View File

@ -399,7 +399,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
}
}
pp->root_bus_nr = -1;
pp->ops = &artpec6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@ -428,7 +427,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);

View File

@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
__dw_pcie_ep_reset_bar(pci, bar, 0);
}
static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
u8 cap)
{
u8 cap_id, next_cap_ptr;
u16 reg;
reg = dw_pcie_readw_dbi(pci, cap_ptr);
next_cap_ptr = (reg & 0xff00) >> 8;
cap_id = (reg & 0x00ff);
if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
return 0;
if (cap_id == cap)
return cap_ptr;
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
{
u8 next_cap_ptr;
u16 reg;
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
next_cap_ptr = (reg & 0x00ff);
if (!next_cap_ptr)
return 0;
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
}
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct pci_epf_header *hdr)
{
@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
{
int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
if (!(val & MSI_CAP_MSI_EN_MASK))
if (!ep->msi_cap)
return -EINVAL;
val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
reg = ep->msi_cap + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
return val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
{
int val;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
val &= ~MSI_CAP_MMC_MASK;
val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
if (!ep->msi_cap)
return -EINVAL;
reg = ep->msi_cap + PCI_MSI_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
dw_pcie_writew_dbi(pci, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
if (!ep->msix_cap)
return -EINVAL;
reg = ep->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
val &= PCI_MSIX_FLAGS_QSIZE;
return val;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 val, reg;
if (!ep->msix_cap)
return -EINVAL;
reg = ep->msix_cap + PCI_MSIX_FLAGS;
val = dw_pcie_readw_dbi(pci, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
val |= interrupts;
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
enum pci_epc_irq_type type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = {
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
.get_msi = dw_pcie_ep_get_msi,
.set_msix = dw_pcie_ep_set_msix,
.get_msix = dw_pcie_ep_get_msix,
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
};
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
dev_err(dev, "EP cannot trigger legacy IRQs\n");
return -EINVAL;
}
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper;
u32 msg_addr_lower, msg_addr_upper, reg;
u64 msg_addr;
bool has_upper;
int ret;
if (!ep->msi_cap)
return -EINVAL;
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
reg = ep->msi_cap + PCI_MSI_FLAGS;
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
if (has_upper) {
msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
reg = ep->msi_cap + PCI_MSI_DATA_64;
msg_data = dw_pcie_readw_dbi(pci, reg);
} else {
msg_addr_upper = 0;
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
reg = ep->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_readw_dbi(pci, reg);
}
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
u16 tbl_offset, bir;
u32 bar_addr_upper, bar_addr_lower;
u32 msg_addr_upper, msg_addr_lower;
u32 reg, msg_data, vec_ctrl;
u64 tbl_addr, msg_addr, reg_u64;
void __iomem *msix_tbl;
int ret;
reg = ep->msix_cap + PCI_MSIX_TABLE;
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
tbl_offset >>= 3;
reg = PCI_BASE_ADDRESS_0 + (4 * bir);
bar_addr_upper = 0;
bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
PCI_MSIX_ENTRY_SIZE);
if (!msix_tbl)
return -EINVAL;
msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
iounmap(msix_tbl);
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
return -EPERM;
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
writel(msg_data, ep->msi_mem);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
return 0;
}
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return -ENOMEM;
ep->outbound_addr = addr;
if (ep->ops->ep_init)
ep->ops->ep_init(ep);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
return PTR_ERR(epc);
}
ep->epc = epc;
epc_set_drvdata(epc, ep);
if (ep->ops->ep_init)
ep->ops->ep_init(ep);
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
if (ret < 0)
epc->max_functions = 1;
@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->page_size);
if (!ep->msi_mem) {
dev_err(dev, "Failed to reserve memory for MSI\n");
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
return -ENOMEM;
}
ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
EPC_FEATURE_SET_BAR(epc->features, BAR_0);
ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
ep->epc = epc;
epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);
return 0;

View File

@ -70,24 +70,29 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
enum pci_barno bar;
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type,
u8 interrupt_num)
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
return -EINVAL;
return dw_pcie_ep_raise_legacy_irq(ep, func_no);
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
case PCI_EPC_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
}
@ -118,7 +123,6 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return pp->msi_irq;
}
pp->root_bus_nr = -1;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);

View File

@ -96,17 +96,6 @@
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
#define MSI_MESSAGE_CONTROL 0x52
#define MSI_CAP_MMC_SHIFT 1
#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
#define MSI_CAP_MME_SHIFT 4
#define MSI_CAP_MSI_EN_MASK 0x1
#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
#define MSI_MESSAGE_ADDR_L32 0x54
#define MSI_MESSAGE_ADDR_U32 0x58
#define MSI_MESSAGE_DATA_32 0x58
#define MSI_MESSAGE_DATA_64 0x5C
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
@ -191,7 +180,7 @@ enum dw_pcie_as_type {
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num);
enum pci_epc_irq_type type, u16 interrupt_num);
};
struct dw_pcie_ep {
@ -208,6 +197,8 @@ struct dw_pcie_ep {
u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ops {
@ -357,8 +348,11 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
@ -374,12 +368,23 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
{
return 0;
}
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
return 0;
}
static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
return 0;
}
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}

View File

@ -420,7 +420,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
phy_init(hipcie->phy);
}
pp->root_bus_nr = -1;
pp->ops = &histb_pcie_host_ops;
platform_set_drvdata(pdev, hipcie);

View File

@ -430,6 +430,9 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
{
kirin_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
return 0;
}
@ -445,9 +448,34 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
.host_init = kirin_pcie_host_init,
};
static int kirin_pcie_add_msi(struct dw_pcie *pci,
struct platform_device *pdev)
{
int irq;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev,
"failed to get MSI IRQ (%d)\n", irq);
return irq;
}
pci->pp.msi_irq = irq;
}
return 0;
}
static int __init kirin_add_pcie_port(struct dw_pcie *pci,
struct platform_device *pdev)
{
int ret;
ret = kirin_pcie_add_msi(pci, pdev);
if (ret)
return ret;
pci->pp.ops = &kirin_pcie_host_ops;
return dw_pcie_host_init(&pci->pp);

View File

@ -1251,7 +1251,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
pp->root_bus_nr = -1;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {

View File

@ -210,7 +210,6 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
return ret;
}
pp->root_bus_nr = -1;
pp->ops = &spear13xx_pcie_host_ops;
ret = dw_pcie_host_init(pp);

View File

@ -111,24 +111,6 @@
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
/* PCIe window configuration */
#define OB_WIN_BASE_ADDR 0x4c00
#define OB_WIN_BLOCK_SIZE 0x20
#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
OB_WIN_BLOCK_SIZE * (win) + \
(offset))
#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
/* PCIe window types */
#define OB_PCIE_MEM 0x0
#define OB_PCIE_IO 0x4
/* LMI registers base address and register offsets */
#define LMI_BASE_ADDR 0x6000
#define CFG_REG (LMI_BASE_ADDR + 0x0)
@ -247,34 +229,9 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
/*
* Set PCIe address window register which could be used for memory
* mapping.
*/
static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
u32 win_num, u32 match_ms,
u32 match_ls, u32 mask_ms,
u32 mask_ls, u32 remap_ms,
u32 remap_ls, u32 action)
{
advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
}
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
int i;
/* Point PCIe unit MBUS decode windows to DRAM space */
for (i = 0; i < 8; i++)
advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
/* Set to Direct mode */
reg = advk_readl(pcie, CTRL_CONFIG_REG);
@ -433,6 +390,15 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
int devfn)
{
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
return false;
return true;
}
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
@ -440,7 +406,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
if (!advk_pcie_valid_device(pcie, bus, devfn)) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
@ -494,7 +460,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int offset;
int ret;
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (where % size)
@ -843,12 +809,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
switch (resource_type(res)) {
case IORESOURCE_IO:
advk_pcie_set_ob_win(pcie, 1,
upper_32_bits(res->start),
lower_32_bits(res->start),
0, 0xF8000000, 0,
lower_32_bits(res->start),
OB_PCIE_IO);
err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
@ -857,12 +817,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
}
break;
case IORESOURCE_MEM:
advk_pcie_set_ob_win(pcie, 0,
upper_32_bits(res->start),
lower_32_bits(res->start),
0x0, 0xF8000000, 0,
lower_32_bits(res->start),
(2 << 20) | OB_PCIE_MEM);
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
@ -889,7 +843,6 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
int ret, irq;
@ -943,21 +896,13 @@ static int advk_pcie_probe(struct platform_device *pdev)
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
ret = pci_host_probe(bridge);
if (ret < 0) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
return ret;
}
bus = bridge->bus;
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
}

View File

@ -1546,7 +1546,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
unsigned long flags;
int ret;
hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC);
hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
if (!hpdev)
return NULL;

View File

@ -125,6 +125,7 @@ struct mvebu_pcie {
struct platform_device *pdev;
struct mvebu_pcie_port *ports;
struct msi_controller *msi;
struct list_head resources;
struct resource io;
struct resource realio;
struct resource mem;
@ -800,7 +801,7 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@ -826,7 +827,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
struct mvebu_pcie *pcie = bus->sysdata;
struct mvebu_pcie_port *port;
int ret;
@ -857,36 +858,6 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct mvebu_pcie *pcie = sys_to_pcie(sys);
int err, i;
pcie->mem.name = "PCI MEM";
pcie->realio.name = "PCI I/O";
if (resource_size(&pcie->realio) != 0)
pci_add_resource_offset(&sys->resources, &pcie->realio,
sys->io_offset);
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
if (err)
return 0;
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
if (!port->base)
continue;
mvebu_pcie_setup_hw(port);
}
return 1;
}
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@ -917,31 +888,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
return start;
}
static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
{
struct hw_pci hw;
memset(&hw, 0, sizeof(hw));
#ifdef CONFIG_PCI_MSI
hw.msi_ctrl = pcie->msi;
#endif
hw.nr_controllers = 1;
hw.private_data = (void **)&pcie;
hw.setup = mvebu_pcie_setup;
hw.map_irq = of_irq_parse_and_map_pci;
hw.ops = &mvebu_pcie_ops;
hw.align_resource = mvebu_pcie_align_resource;
pci_common_init_dev(&pcie->pdev->dev, &hw);
}
/*
* Looks up the list of register addresses encoded into the reg =
* <...> property for one that matches the given port/lane. Once
* found, maps it.
*/
static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
struct device_node *np,
struct mvebu_pcie_port *port)
@ -1190,38 +1136,19 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
clk_disable_unprepare(port->clk);
}
static int mvebu_pcie_probe(struct platform_device *pdev)
/*
* We can't use devm_of_pci_get_host_bridge_resources() because we
* need to parse our special DT properties encoding the MEM and IO
* apertures.
*/
static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
unsigned int i;
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
/* Get the PCIe memory and I/O aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
if (resource_size(&pcie->io) != 0) {
pcie->realio.flags = pcie->io.flags;
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
IO_SPACE_LIMIT,
resource_size(&pcie->io));
} else
pcie->realio = pcie->io;
INIT_LIST_HEAD(&pcie->resources);
/* Get the bus range */
ret = of_pci_parse_bus_range(np, &pcie->busn);
@ -1229,6 +1156,58 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
dev_err(dev, "failed to parse bus-range property: %d\n", ret);
return ret;
}
pci_add_resource(&pcie->resources, &pcie->busn);
/* Get the PCIe memory aperture */
mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
if (resource_size(&pcie->mem) == 0) {
dev_err(dev, "invalid memory aperture size\n");
return -EINVAL;
}
pcie->mem.name = "PCI MEM";
pci_add_resource(&pcie->resources, &pcie->mem);
/* Get the PCIe IO aperture */
mvebu_mbus_get_pcie_io_aperture(&pcie->io);
if (resource_size(&pcie->io) != 0) {
pcie->realio.flags = pcie->io.flags;
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
IO_SPACE_LIMIT - SZ_64K,
resource_size(&pcie->io) - 1);
pcie->realio.name = "PCI I/O";
for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
pci_add_resource(&pcie->resources, &pcie->realio);
}
return devm_request_pci_bus_resources(dev, &pcie->resources);
}
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mvebu_pcie *pcie;
struct pci_host_bridge *bridge;
struct device_node *np = dev->of_node;
struct device_node *child;
int num, i, ret;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie));
if (!bridge)
return -ENOMEM;
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
ret = mvebu_pcie_parse_request_resources(pcie);
if (ret)
return ret;
num = of_get_available_child_count(np);
@ -1272,20 +1251,24 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
mvebu_sw_pci_bridge_init(port);
}
pcie->nports = i;
for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
list_splice_init(&pcie->resources, &bridge->windows);
bridge->dev.parent = dev;
bridge->sysdata = pcie;
bridge->busnr = 0;
bridge->ops = &mvebu_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
bridge->align_resource = mvebu_pcie_align_resource;
bridge->msi = pcie->msi;
mvebu_pcie_enable(pcie);
platform_set_drvdata(pdev, pcie);
return 0;
return pci_host_probe(bridge);
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {

View File

@ -238,7 +238,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mmc, mme;
u16 flags, mme;
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@ -249,7 +249,6 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1;
mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
return mme;
@ -363,7 +362,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type, u8 interrupt_num)
enum pci_epc_irq_type type,
u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@ -439,6 +439,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
struct pci_epc *epc;
struct resource *res;
int ret;
int phy_count;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
if (!ep)
@ -473,6 +474,12 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
if (!ep->ob_addr)
return -ENOMEM;
ret = cdns_pcie_init_phy(dev, pcie);
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@ -521,6 +528,10 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
phy_count = pcie->phy_count;
while (phy_count--)
device_link_del(pcie->link[phy_count]);
return ret;
}
@ -528,6 +539,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
@ -536,13 +548,14 @@ static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
/* The PCIe controller can't be disabled. */
cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_ep_driver = {
.driver = {
.name = "cdns-pcie-ep",
.of_match_table = cdns_pcie_ep_of_match,
.pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_ep_probe,
.shutdown = cdns_pcie_ep_shutdown,

View File

@ -58,6 +58,11 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return pcie->reg_base + (where & 0xfff);
}
/* Check that the link is up */
if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
return NULL;
/* Clear AXI link-down status */
cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
/* Update Output registers for AXI region 0. */
addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
@ -239,6 +244,7 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
struct cdns_pcie *pcie;
struct resource *res;
int ret;
int phy_count;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
if (!bridge)
@ -290,6 +296,13 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
}
pcie->mem_res = res;
ret = cdns_pcie_init_phy(dev, pcie);
if (ret) {
dev_err(dev, "failed to init phy\n");
return ret;
}
platform_set_drvdata(pdev, pcie);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@ -322,15 +335,35 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
err_get_sync:
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
phy_count = pcie->phy_count;
while (phy_count--)
device_link_del(pcie->link[phy_count]);
return ret;
}
static void cdns_pcie_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_put_sync(dev);
if (ret < 0)
dev_dbg(dev, "pm_runtime_put_sync failed\n");
pm_runtime_disable(dev);
cdns_pcie_disable_phy(pcie);
}
static struct platform_driver cdns_pcie_host_driver = {
.driver = {
.name = "cdns-pcie-host",
.of_match_table = cdns_pcie_host_of_match,
.pm = &cdns_pcie_pm_ops,
},
.probe = cdns_pcie_host_probe,
.shutdown = cdns_pcie_shutdown,
};
builtin_platform_driver(cdns_pcie_host_driver);

View File

@ -124,3 +124,126 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
int i = pcie->phy_count;
while (i--) {
phy_power_off(pcie->phy[i]);
phy_exit(pcie->phy[i]);
}
}
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
int ret;
int i;
for (i = 0; i < pcie->phy_count; i++) {
ret = phy_init(pcie->phy[i]);
if (ret < 0)
goto err_phy;
ret = phy_power_on(pcie->phy[i]);
if (ret < 0) {
phy_exit(pcie->phy[i]);
goto err_phy;
}
}
return 0;
err_phy:
while (--i >= 0) {
phy_power_off(pcie->phy[i]);
phy_exit(pcie->phy[i]);
}
return ret;
}
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
struct device_node *np = dev->of_node;
int phy_count;
struct phy **phy;
struct device_link **link;
int i;
int ret;
const char *name;
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
dev_err(dev, "no phy-names. PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
if (!phy)
return -ENOMEM;
link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
if (!link)
return -ENOMEM;
for (i = 0; i < phy_count; i++) {
of_property_read_string_index(np, "phy-names", i, &name);
phy[i] = devm_phy_optional_get(dev, name);
if (IS_ERR(phy))
return PTR_ERR(phy);
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
if (!link[i]) {
ret = -EINVAL;
goto err_link;
}
}
pcie->phy_count = phy_count;
pcie->phy = phy;
pcie->link = link;
ret = cdns_pcie_enable_phy(pcie);
if (ret)
goto err_link;
return 0;
err_link:
while (--i >= 0)
device_link_del(link[i]);
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int cdns_pcie_suspend_noirq(struct device *dev)
{
struct cdns_pcie *pcie = dev_get_drvdata(dev);
cdns_pcie_disable_phy(pcie);
return 0;
}
static int cdns_pcie_resume_noirq(struct device *dev)
{
struct cdns_pcie *pcie = dev_get_drvdata(dev);
int ret;
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
dev_err(dev, "failed to enable phy\n");
return ret;
}
return 0;
}
#endif
const struct dev_pm_ops cdns_pcie_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};

View File

@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
/*
* Local Management Registers
@ -165,6 +166,9 @@
#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
/* AXI link down register */
#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
enum cdns_pcie_rp_bar {
RP_BAR0,
RP_BAR1,
@ -229,6 +233,9 @@ struct cdns_pcie {
struct resource *mem_res;
bool is_rc;
u8 bus;
int phy_count;
struct phy **phy;
struct device_link **link;
};
/* Register access */
@ -279,7 +286,7 @@ static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
}
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
u32 reg, u16 value)
u32 reg, u32 value)
{
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
@ -307,5 +314,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
u32 r, u64 cpu_addr);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */

View File

@ -85,6 +85,8 @@
#define IMAP_VALID_SHIFT 0
#define IMAP_VALID BIT(IMAP_VALID_SHIFT)
#define IPROC_PCI_PM_CAP 0x48
#define IPROC_PCI_PM_CAP_MASK 0xffff
#define IPROC_PCI_EXP_CAP 0xac
#define IPROC_PCIE_REG_INVALID 0xffff
@ -375,6 +377,17 @@ static const u16 iproc_pcie_reg_paxc_v2[] = {
[IPROC_PCIE_CFG_DATA] = 0x1fc,
};
/*
* List of device IDs of controllers that have corrupted capability list that
* require SW fixup
*/
static const u16 iproc_pcie_corrupt_cap_did[] = {
0x16cd,
0x16f0,
0xd802,
0xd804
};
static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
{
struct iproc_pcie *pcie = bus->sysdata;
@ -495,6 +508,49 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p)
return data;
}
static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
{
u32 i, dev_id;
switch (where & ~0x3) {
case PCI_VENDOR_ID:
dev_id = *val >> 16;
/*
* Activate fixup for those controllers that have corrupted
* capability list registers
*/
for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++)
if (dev_id == iproc_pcie_corrupt_cap_did[i])
pcie->fix_paxc_cap = true;
break;
case IPROC_PCI_PM_CAP:
if (pcie->fix_paxc_cap) {
/* advertise PM, force next capability to PCIe */
*val &= ~IPROC_PCI_PM_CAP_MASK;
*val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
}
break;
case IPROC_PCI_EXP_CAP:
if (pcie->fix_paxc_cap) {
/* advertise root port, version 2, terminate here */
*val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
PCI_CAP_ID_EXP;
}
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
/* Don't advertise CRS SV support */
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
break;
default:
break;
}
}
static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
@ -509,13 +565,10 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
/* root complex access */
if (busno == 0) {
ret = pci_generic_config_read32(bus, devfn, where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
if (ret == PCIBIOS_SUCCESSFUL)
iproc_pcie_fix_cap(pcie, where, val);
/* Don't advertise CRS SV support */
if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL)
*val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
return PCIBIOS_SUCCESSFUL;
return ret;
}
cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
@ -529,6 +582,25 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
if (size <= 2)
*val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
/*
* For PAXC and PAXCv2, the total number of PFs that one can enumerate
* depends on the firmware configuration. Unfortunately, due to an ASIC
* bug, unconfigured PFs cannot be properly hidden from the root
* complex. As a result, write access to these PFs will cause bus lock
* up on the embedded processor
*
* Since all unconfigured PFs are left with an incorrect, staled device
* ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access
* early here and reject them all
*/
#define DEVICE_ID_MASK 0xffff0000
#define DEVICE_ID_SHIFT 16
if (pcie->rej_unconfig_pf &&
(where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID)
if ((*val & DEVICE_ID_MASK) ==
(PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT))
return PCIBIOS_FUNC_NOT_SUPPORTED;
return PCIBIOS_SUCCESSFUL;
}
@ -628,7 +700,7 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
struct iproc_pcie *pcie = iproc_data(bus);
iproc_pcie_apb_err_disable(bus, true);
if (pcie->type == IPROC_PCIE_PAXB_V2)
if (pcie->iproc_cfg_read)
ret = iproc_pcie_config_read(bus, devfn, where, size, val);
else
ret = pci_generic_config_read32(bus, devfn, where, size, val);
@ -808,14 +880,14 @@ static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx,
writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
window_idx, oarr_offset, &axi_addr, &pci_addr);
dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n",
readl(pcie->base + oarr_offset),
readl(pcie->base + oarr_offset + 4));
dev_info(dev, "omap lo 0x%x omap hi 0x%x\n",
readl(pcie->base + omap_offset),
readl(pcie->base + omap_offset + 4));
dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n",
window_idx, oarr_offset, &axi_addr, &pci_addr);
dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n",
readl(pcie->base + oarr_offset),
readl(pcie->base + oarr_offset + 4));
dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n",
readl(pcie->base + omap_offset),
readl(pcie->base + omap_offset + 4));
return 0;
}
@ -982,8 +1054,8 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
iproc_pcie_reg_is_invalid(imap_offset))
return -EINVAL;
dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
region_idx, iarr_offset, &axi_addr, &pci_addr);
dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n",
region_idx, iarr_offset, &axi_addr, &pci_addr);
/*
* Program the IARR registers. The upper 32-bit IARR register is
@ -993,9 +1065,9 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
pcie->base + iarr_offset);
writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n",
readl(pcie->base + iarr_offset),
readl(pcie->base + iarr_offset + 4));
dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
readl(pcie->base + iarr_offset),
readl(pcie->base + iarr_offset + 4));
/*
* Now program the IMAP registers. Each IARR region may have one or
@ -1009,10 +1081,10 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx,
writel(upper_32_bits(axi_addr),
pcie->base + imap_offset + ib_map->imap_addr_offset);
dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
window_idx, readl(pcie->base + imap_offset),
readl(pcie->base + imap_offset +
ib_map->imap_addr_offset));
dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
window_idx, readl(pcie->base + imap_offset),
readl(pcie->base + imap_offset +
ib_map->imap_addr_offset));
imap_offset += ib_map->imap_window_offset;
axi_addr += size;
@ -1144,10 +1216,22 @@ static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
return ret;
}
static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr)
static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr,
bool enable)
{
u32 val;
if (!enable) {
/*
* Disable PAXC MSI steering. All write transfers will be
* treated as non-MSI transfers
*/
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
val &= ~MSI_ENABLE_CFG;
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
return;
}
/*
* Program bits [43:13] of address of GITS_TRANSLATER register into
* bits [30:0] of the MSI base address register. In fact, in all iProc
@ -1201,7 +1285,7 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
return ret;
break;
case IPROC_PCIE_PAXC_V2:
iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr);
iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true);
break;
default:
return -EINVAL;
@ -1271,6 +1355,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
break;
case IPROC_PCIE_PAXB:
regs = iproc_pcie_reg_paxb;
pcie->iproc_cfg_read = true;
pcie->has_apb_err_disable = true;
if (pcie->need_ob_cfg) {
pcie->ob_map = paxb_ob_map;
@ -1293,10 +1378,14 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
case IPROC_PCIE_PAXC:
regs = iproc_pcie_reg_paxc;
pcie->ep_is_internal = true;
pcie->iproc_cfg_read = true;
pcie->rej_unconfig_pf = true;
break;
case IPROC_PCIE_PAXC_V2:
regs = iproc_pcie_reg_paxc_v2;
pcie->ep_is_internal = true;
pcie->iproc_cfg_read = true;
pcie->rej_unconfig_pf = true;
pcie->need_msi_steer = true;
break;
default:
@ -1427,6 +1516,24 @@ int iproc_pcie_remove(struct iproc_pcie *pcie)
}
EXPORT_SYMBOL(iproc_pcie_remove);
/*
* The MSI parsing logic in certain revisions of Broadcom PAXC based root
* complex does not work and needs to be disabled
*/
static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
{
struct iproc_pcie *pcie = iproc_data(pdev->bus);
if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
iproc_pcie_paxc_v2_msi_steer(pcie, 0, false);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0,
quirk_paxc_disable_msi_parsing);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802,
quirk_paxc_disable_msi_parsing);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804,
quirk_paxc_disable_msi_parsing);
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");

View File

@ -58,8 +58,13 @@ struct iproc_msi;
* @phy: optional PHY device that controls the Serdes
* @map_irq: function callback to map interrupts
* @ep_is_internal: indicates an internal emulated endpoint device is connected
* @iproc_cfg_read: indicates the iProc config read function should be used
* @rej_unconfig_pf: indicates the root complex needs to detect and reject
* enumeration against unconfigured physical functions emulated in the ASIC
* @has_apb_err_disable: indicates the controller can be configured to prevent
* unsupported request from being forwarded as an APB bus error
* @fix_paxc_cap: indicates the controller has corrupted capability list in its
* config space registers and requires SW based fixup
*
* @need_ob_cfg: indicates SW needs to configure the outbound mapping window
* @ob: outbound mapping related parameters
@ -84,7 +89,10 @@ struct iproc_pcie {
struct phy *phy;
int (*map_irq)(const struct pci_dev *, u8, u8);
bool ep_is_internal;
bool iproc_cfg_read;
bool rej_unconfig_pf;
bool has_apb_err_disable;
bool fix_paxc_cap;
bool need_ob_cfg;
struct iproc_pcie_ob ob;

View File

@ -23,6 +23,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "../pci.h"
/* register offsets and bit positions */
/*
@ -130,7 +132,7 @@ struct mobiveil_pcie {
void __iomem *config_axi_slave_base; /* endpoint config base */
void __iomem *csr_axi_slave_base; /* root port config base */
void __iomem *apb_csr_base; /* MSI register base */
void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
struct irq_domain *intx_domain;
raw_spinlock_t intx_mask_lock;
int irq;

View File

@ -472,7 +472,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
enum pci_epc_irq_type type,
u8 interrupt_num)
u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);

View File

@ -197,9 +197,20 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
int i, best = 1;
unsigned long flags;
if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1)
if (vmd->msix_count == 1)
return &vmd->irqs[0];
/*
* White list for fast-interrupt handlers. All others will share the
* "slow" interrupt vector.
*/
switch (msi_desc_to_pci_dev(desc)->class) {
case PCI_CLASS_STORAGE_EXPRESS:
break;
default:
return &vmd->irqs[0];
}
raw_spin_lock_irqsave(&list_lock, flags);
for (i = 1; i < vmd->msix_count; i++)
if (vmd->irqs[i].count < vmd->irqs[best].count)

View File

@ -18,13 +18,16 @@
#include <linux/pci-epf.h>
#include <linux/pci_regs.h>
#define IRQ_TYPE_LEGACY 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define MSI_NUMBER_SHIFT 2
#define MSI_NUMBER_MASK (0x3f << MSI_NUMBER_SHIFT)
#define COMMAND_READ BIT(8)
#define COMMAND_WRITE BIT(9)
#define COMMAND_COPY BIT(10)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@ -45,6 +48,7 @@ struct pci_epf_test {
struct pci_epf *epf;
enum pci_barno test_reg_bar;
bool linkup_notifier;
bool msix_available;
struct delayed_work cmd_handler;
};
@ -56,6 +60,8 @@ struct pci_epf_test_reg {
u64 dst_addr;
u32 size;
u32 checksum;
u32 irq_type;
u32 irq_number;
} __packed;
static struct pci_epf_header test_header = {
@ -244,31 +250,42 @@ err:
return ret;
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
u16 irq)
{
u8 msi_count;
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
reg->status |= STATUS_IRQ_RAISED;
msi_count = pci_epc_get_msi(epc, epf->func_no);
if (irq > msi_count || msi_count <= 0)
switch (irq_type) {
case IRQ_TYPE_LEGACY:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
else
break;
case IRQ_TYPE_MSI:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
break;
case IRQ_TYPE_MSIX:
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
break;
}
}
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
int ret;
u8 irq;
u8 msi_count;
int count;
u32 command;
struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
cmd_handler.work);
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
@ -280,7 +297,10 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->command = 0;
reg->status = 0;
irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
if (reg->irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
@ -294,7 +314,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_WRITE_FAIL;
else
reg->status |= STATUS_WRITE_SUCCESS;
pci_epf_test_raise_irq(epf_test, irq);
pci_epf_test_raise_irq(epf_test, reg->irq_type,
reg->irq_number);
goto reset_handler;
}
@ -304,7 +325,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
pci_epf_test_raise_irq(epf_test, irq);
pci_epf_test_raise_irq(epf_test, reg->irq_type,
reg->irq_number);
goto reset_handler;
}
@ -314,16 +336,28 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
pci_epf_test_raise_irq(epf_test, irq);
pci_epf_test_raise_irq(epf_test, reg->irq_type,
reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSI_IRQ) {
msi_count = pci_epc_get_msi(epc, epf->func_no);
if (irq > msi_count || msi_count <= 0)
count = pci_epc_get_msi(epc, epf->func_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
reg->irq_number);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSIX_IRQ) {
count = pci_epc_get_msix(epc, epf->func_no);
if (reg->irq_number > count || count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
reg->irq_number);
goto reset_handler;
}
@ -440,6 +474,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
else
epf_test->linkup_notifier = true;
epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE;
epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features);
ret = pci_epc_write_header(epc, epf->func_no, header);
@ -457,8 +493,18 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return ret;
ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
if (ret)
if (ret) {
dev_err(dev, "MSI configuration failed\n");
return ret;
}
if (epf_test->msix_available) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts);
if (ret) {
dev_err(dev, "MSI-X configuration failed\n");
return ret;
}
}
if (!epf_test->linkup_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);

View File

@ -286,6 +286,28 @@ static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
to_pci_epf_group(item)->epf->msi_interrupts);
}
static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
const char *page, size_t len)
{
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
if (ret)
return ret;
to_pci_epf_group(item)->epf->msix_interrupts = val;
return len;
}
static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
char *page)
{
return sprintf(page, "%d\n",
to_pci_epf_group(item)->epf->msix_interrupts);
}
PCI_EPF_HEADER_R(vendorid)
PCI_EPF_HEADER_W_u16(vendorid)
@ -327,6 +349,7 @@ CONFIGFS_ATTR(pci_epf_, subsys_vendor_id);
CONFIGFS_ATTR(pci_epf_, subsys_id);
CONFIGFS_ATTR(pci_epf_, interrupt_pin);
CONFIGFS_ATTR(pci_epf_, msi_interrupts);
CONFIGFS_ATTR(pci_epf_, msix_interrupts);
static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_vendorid,
@ -340,6 +363,7 @@ static struct configfs_attribute *pci_epf_attrs[] = {
&pci_epf_attr_subsys_id,
&pci_epf_attr_interrupt_pin,
&pci_epf_attr_msi_interrupts,
&pci_epf_attr_msix_interrupts,
NULL,
};

View File

@ -131,13 +131,13 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
* pci_epc_raise_irq() - interrupt the host system
* @epc: the EPC device which has to interrupt the host
* @func_no: the endpoint function number in the EPC device
* @type: specify the type of interrupt; legacy or MSI
* @interrupt_num: the MSI interrupt number
* @type: specify the type of interrupt; legacy, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number
*
* Invoke to raise an MSI or legacy interrupt
* Invoke to raise an legacy, MSI or MSI-X interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
enum pci_epc_irq_type type, u16 interrupt_num)
{
int ret;
unsigned long flags;
@ -201,7 +201,8 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
u8 encode_int;
unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts > 32)
return -EINVAL;
if (!epc->ops->set_msi)
@ -217,6 +218,63 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
}
EXPORT_SYMBOL_GPL(pci_epc_set_msi);
/**
* pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated
* @epc: the EPC device to which MSI-X interrupts was requested
* @func_no: the endpoint function number in the EPC device
*
* Invoke to get the number of MSI-X interrupts allocated by the RC
*/
int pci_epc_get_msix(struct pci_epc *epc, u8 func_no)
{
int interrupt;
unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
return 0;
if (!epc->ops->get_msix)
return 0;
spin_lock_irqsave(&epc->lock, flags);
interrupt = epc->ops->get_msix(epc, func_no);
spin_unlock_irqrestore(&epc->lock, flags);
if (interrupt < 0)
return 0;
return interrupt + 1;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msix);
/**
* pci_epc_set_msix() - set the number of MSI-X interrupt numbers required
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the endpoint function number in the EPC device
* @interrupts: number of MSI-X interrupts required by the EPF
*
* Invoke to set the required number of MSI-X interrupts.
*/
int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
{
int ret;
unsigned long flags;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
interrupts < 1 || interrupts > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
return 0;
spin_lock_irqsave(&epc->lock, flags);
ret = epc->ops->set_msix(epc, func_no, interrupts - 1);
spin_unlock_irqrestore(&epc->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epc_set_msix);
/**
* pci_epc_unmap_addr() - unmap CPU address from PCI address
* @epc: the EPC device on which address is allocated

View File

@ -73,20 +73,6 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
acpi_handle chandle, handle;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
* an _OSC is missing, we look for an OSHP to do the same thing.
* To handle different BIOS behavior, we look for _OSC on a root
* bridge preferentially (according to PCI fw spec). Later for
* OSHP within the scope of the hotplug controller and its parents,
* up to the host bridge under which this controller exists.
*/
if (shpchp_is_native(pdev))
return 0;
/* If _OSC exists, we should not evaluate OSHP */
/*
* If there's no ACPI host bridge (i.e., ACPI support is compiled
* into the kernel but the hardware platform doesn't support ACPI),
@ -97,9 +83,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
if (!root)
return 0;
if (root->osc_support_set)
goto no_control;
/*
* If _OSC exists, it determines whether we're allowed to manage
* the SHPC. We executed it while enumerating the host bridge.
*/
if (root->osc_support_set) {
if (host->native_shpc_hotplug)
return 0;
return -ENODEV;
}
/*
* In the absence of _OSC, we're always allowed to manage the SHPC.
* However, if an OSHP method is present, we must execute it so the
* firmware can transfer control to the OS, e.g., direct interrupts
* to the OS instead of to the firmware.
*
* N.B. The PCI Firmware Spec (r3.2, sec 4.8) does not endorse
* searching up the ACPI hierarchy, so the loops below are suspect.
*/
handle = ACPI_HANDLE(&pdev->dev);
if (!handle) {
/*
@ -128,7 +130,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
if (ACPI_FAILURE(status))
break;
}
no_control:
pci_info(pdev, "Cannot get control of SHPC hotplug\n");
kfree(string.pointer);
return -ENODEV;

View File

@ -254,20 +254,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot);
kfree(slot);
}
/* callback routine to initialize 'struct slot' for each slot */
int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
unsigned int sun)
@ -287,7 +273,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
slot->hotplug_slot->info = &slot->info;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->release = &release_slot;
slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
@ -324,13 +309,12 @@ error:
void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
{
struct slot *slot = acpiphp_slot->slot;
int retval = 0;
pr_info("Slot [%s] unregistered\n", slot_name(slot));
retval = pci_hp_deregister(slot->hotplug_slot);
if (retval)
pr_err("pci_hp_deregister failed with error %d\n", retval);
pci_hp_deregister(slot->hotplug_slot);
kfree(slot->hotplug_slot);
kfree(slot);
}

View File

@ -195,10 +195,8 @@ get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
static void release_slot(struct slot *slot)
{
struct slot *slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
@ -253,7 +251,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
hotplug_slot->ops = &cpci_hotplug_slot_ops;
/*
@ -308,12 +305,8 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
status = pci_hp_deregister(slot->hotplug_slot);
if (status) {
err("pci_hp_deregister failed with error %d",
status);
break;
}
pci_hp_deregister(slot->hotplug_slot);
release_slot(slot);
}
}
up_write(&list_rwsem);
@ -623,6 +616,7 @@ cleanup_slots(void)
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
release_slot(slot);
}
cleanup_null:
up_write(&list_rwsem);

View File

@ -266,17 +266,6 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
return previous;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static int ctrl_slot_cleanup(struct controller *ctrl)
{
struct slot *old_slot, *next_slot;
@ -285,9 +274,11 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
ctrl->slot = NULL;
while (old_slot) {
/* memory will be freed by the release_slot callback */
next_slot = old_slot->next;
pci_hp_deregister(old_slot->hotplug_slot);
kfree(old_slot->hotplug_slot->info);
kfree(old_slot->hotplug_slot);
kfree(old_slot);
old_slot = next_slot;
}
@ -678,7 +669,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
hotplug_slot->release = &release_slot;
hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
hotplug_slot->ops = &cpqphp_hotplug_slot_ops;

View File

@ -673,7 +673,20 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
pci_hp_deregister(slot_cur->hotplug_slot);
pci_hp_del(slot_cur->hotplug_slot);
slot_cur->ctrl = NULL;
slot_cur->bus_on = NULL;
/*
* We don't want to actually remove the resources,
* since ibmphp_free_resources() will do just that.
*/
ibmphp_unconfigure_card(&slot_cur, -1);
pci_hp_destroy(slot_cur->hotplug_slot);
kfree(slot_cur->hotplug_slot->info);
kfree(slot_cur->hotplug_slot);
kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
}

View File

@ -699,25 +699,6 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
return rc;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot;
if (!hotplug_slot || !hotplug_slot->private)
return;
slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
slot->ctrl = NULL;
slot->bus_on = NULL;
/* we don't want to actually remove the resources, since free_resources will do just that */
ibmphp_unconfigure_card(&slot, -1);
kfree(slot);
}
static struct pci_driver ibmphp_driver;
/*
@ -941,7 +922,6 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->hotplug_slot = hp_slot_ptr;
hp_slot_ptr->private = tmp_slot;
hp_slot_ptr->release = release_slot;
rc = fillslotinfo(hp_slot_ptr);
if (rc)

View File

@ -396,8 +396,9 @@ static struct hotplug_slot *get_slot_from_name(const char *name)
* @owner: caller module owner
* @mod_name: caller module name
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot.
* Prepares a hotplug slot for in-kernel use and immediately publishes it to
* user space in one go. Drivers may alternatively carry out the two steps
* separately by invoking pci_hp_initialize() and pci_hp_add().
*
* Returns 0 if successful, anything else for an error.
*/
@ -406,45 +407,91 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
struct module *owner, const char *mod_name)
{
int result;
result = __pci_hp_initialize(slot, bus, devnr, name, owner, mod_name);
if (result)
return result;
result = pci_hp_add(slot);
if (result)
pci_hp_destroy(slot);
return result;
}
EXPORT_SYMBOL_GPL(__pci_hp_register);
/**
* __pci_hp_initialize - prepare hotplug slot for in-kernel use
* @slot: pointer to the &struct hotplug_slot to initialize
* @bus: bus this slot is on
* @devnr: slot number
* @name: name registered with kobject core
* @owner: caller module owner
* @mod_name: caller module name
*
* Allocate and fill in a PCI slot for use by a hotplug driver. Once this has
* been called, the driver may invoke hotplug_slot_name() to get the slot's
* unique name. The driver must be prepared to handle a ->reset_slot callback
* from this point on.
*
* Returns 0 on success or a negative int on error.
*/
int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
int devnr, const char *name, struct module *owner,
const char *mod_name)
{
struct pci_slot *pci_slot;
if (slot == NULL)
return -ENODEV;
if ((slot->info == NULL) || (slot->ops == NULL))
return -EINVAL;
if (slot->release == NULL) {
dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
return -EINVAL;
}
slot->ops->owner = owner;
slot->ops->mod_name = mod_name;
mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
}
if (IS_ERR(pci_slot))
return PTR_ERR(pci_slot);
slot->pci_slot = pci_slot;
pci_slot->hotplug = slot;
return 0;
}
EXPORT_SYMBOL_GPL(__pci_hp_initialize);
list_add(&slot->slot_list, &pci_hotplug_slot_list);
/**
* pci_hp_add - publish hotplug slot to user space
* @slot: pointer to the &struct hotplug_slot to publish
*
* Make a hotplug slot's sysfs interface available and inform user space of its
* addition by sending a uevent. The hotplug driver must be prepared to handle
* all &struct hotplug_slot_ops callbacks from this point on.
*
* Returns 0 on success or a negative int on error.
*/
int pci_hp_add(struct hotplug_slot *slot)
{
struct pci_slot *pci_slot = slot->pci_slot;
int result;
result = fs_add_slot(pci_slot);
if (result)
return result;
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
dbg("Added slot %s to the list\n", name);
out:
mutex_lock(&pci_hp_mutex);
list_add(&slot->slot_list, &pci_hotplug_slot_list);
mutex_unlock(&pci_hp_mutex);
return result;
dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
return 0;
}
EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_add);
/**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
@ -455,35 +502,62 @@ EXPORT_SYMBOL_GPL(__pci_hp_register);
*
* Returns 0 if successful, anything else for an error.
*/
int pci_hp_deregister(struct hotplug_slot *slot)
void pci_hp_deregister(struct hotplug_slot *slot)
{
pci_hp_del(slot);
pci_hp_destroy(slot);
}
EXPORT_SYMBOL_GPL(pci_hp_deregister);
/**
* pci_hp_del - unpublish hotplug slot from user space
* @slot: pointer to the &struct hotplug_slot to unpublish
*
* Remove a hotplug slot's sysfs interface.
*
* Returns 0 on success or a negative int on error.
*/
void pci_hp_del(struct hotplug_slot *slot)
{
struct hotplug_slot *temp;
struct pci_slot *pci_slot;
if (!slot)
return -ENODEV;
if (WARN_ON(!slot))
return;
mutex_lock(&pci_hp_mutex);
temp = get_slot_from_name(hotplug_slot_name(slot));
if (temp != slot) {
if (WARN_ON(temp != slot)) {
mutex_unlock(&pci_hp_mutex);
return -ENODEV;
return;
}
list_del(&slot->slot_list);
pci_slot = slot->pci_slot;
fs_remove_slot(pci_slot);
mutex_unlock(&pci_hp_mutex);
dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
fs_remove_slot(slot->pci_slot);
}
EXPORT_SYMBOL_GPL(pci_hp_del);
slot->release(slot);
/**
* pci_hp_destroy - remove hotplug slot from in-kernel use
* @slot: pointer to the &struct hotplug_slot to destroy
*
* Destroy a PCI slot used by a hotplug driver. Once this has been called,
* the driver may no longer invoke hotplug_slot_name() to get the slot's
* unique name. The driver no longer needs to handle a ->reset_slot callback
* from this point on.
*
* Returns 0 on success or a negative int on error.
*/
void pci_hp_destroy(struct hotplug_slot *slot)
{
struct pci_slot *pci_slot = slot->pci_slot;
slot->pci_slot = NULL;
pci_slot->hotplug = NULL;
pci_destroy_slot(pci_slot);
mutex_unlock(&pci_hp_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_destroy);
/**
* pci_hp_change_slot_info - changes the slot's information structure in the core

View File

@ -21,6 +21,7 @@
#include <linux/delay.h>
#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include "../pcie/portdrv.h"
@ -57,49 +58,111 @@ do { \
dev_warn(&ctrl->pcie->device, format, ## arg)
#define SLOT_NAME_SIZE 10
/**
* struct slot - PCIe hotplug slot
* @state: current state machine position
* @ctrl: pointer to the slot's controller structure
* @hotplug_slot: pointer to the structure registered with the PCI hotplug core
* @work: work item to turn the slot on or off after 5 seconds in response to
* an Attention Button press
* @lock: protects reads and writes of @state;
* protects scheduling, execution and cancellation of @work
*/
struct slot {
u8 state;
struct controller *ctrl;
struct hotplug_slot *hotplug_slot;
struct delayed_work work; /* work for button event */
struct delayed_work work;
struct mutex lock;
struct mutex hotplug_lock;
struct workqueue_struct *wq;
};
struct event_info {
u32 event_type;
struct slot *p_slot;
struct work_struct work;
};
/**
* struct controller - PCIe hotplug controller
* @ctrl_lock: serializes writes to the Slot Control register
* @pcie: pointer to the controller's PCIe port service device
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
* @slot: pointer to the controller's slot structure
* @queue: wait queue to wake up on reception of a Command Completed event,
* used for synchronous writes to the Slot Control register
* @slot_cap: cached copy of the Slot Capabilities register
* @slot_ctrl: cached copy of the Slot Control register
* @poll_thread: thread to poll for slot events if no IRQ is available,
* enabled with pciehp_poll_mode module parameter
* @cmd_started: jiffies when the Slot Control register was last written;
* the next write is allowed 1 second later, absent a Command Completed
* interrupt (PCIe r4.0, sec 6.7.3.2)
* @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
* on reception of a Command Completed event
* @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
* Capable bit in Link Capabilities register; if this bit is zero, the
* Data Link Layer Link Active bit in the Link Status register will never
* be set and the driver is thus confined to wait 1 second before assuming
* the link to a hotplugged device is up and accessing it
* @notification_enabled: whether the IRQ was requested successfully
* @power_fault_detected: whether a power fault was detected by the hardware
* that has not yet been cleared by the user
* @pending_events: used by the IRQ handler to save events retrieved from the
* Slot Status register for later consumption by the IRQ thread
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
*/
struct controller {
struct mutex ctrl_lock; /* controller lock */
struct pcie_device *pcie; /* PCI Express port service */
struct mutex ctrl_lock;
struct pcie_device *pcie;
struct rw_semaphore reset_lock;
struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */
wait_queue_head_t queue;
u32 slot_cap;
u16 slot_ctrl;
struct timer_list poll_timer;
struct task_struct *poll_thread;
unsigned long cmd_started; /* jiffies */
unsigned int cmd_busy:1;
unsigned int link_active_reporting:1;
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
atomic_t pending_events;
int request_result;
wait_queue_head_t requester;
};
#define INT_PRESENCE_ON 1
#define INT_PRESENCE_OFF 2
#define INT_POWER_FAULT 3
#define INT_BUTTON_PRESS 4
#define INT_LINK_UP 5
#define INT_LINK_DOWN 6
#define STATIC_STATE 0
/**
* DOC: Slot state
*
* @OFF_STATE: slot is powered off, no subordinate devices are enumerated
* @BLINKINGON_STATE: slot will be powered on after the 5 second delay,
* green led is blinking
* @BLINKINGOFF_STATE: slot will be powered off after the 5 second delay,
* green led is blinking
* @POWERON_STATE: slot is currently powering on
* @POWEROFF_STATE: slot is currently powering off
* @ON_STATE: slot is powered on, subordinate devices have been enumerated
*/
#define OFF_STATE 0
#define BLINKINGON_STATE 1
#define BLINKINGOFF_STATE 2
#define POWERON_STATE 3
#define POWEROFF_STATE 4
#define ON_STATE 5
/**
* DOC: Flags to request an action from the IRQ thread
*
* These are stored together with events read from the Slot Status register,
* hence must be greater than its 16-bit width.
*
* %DISABLE_SLOT: Disable the slot in response to a user request via sysfs or
* an Attention Button press after the 5 second delay
* %RERUN_ISR: Used by the IRQ handler to inform the IRQ thread that the
* hotplug port was inaccessible when the interrupt occurred, requiring
* that the IRQ handler is rerun by the IRQ thread after it has made the
* hotplug port accessible by runtime resuming its parents to D0
*/
#define DISABLE_SLOT (1 << 16)
#define RERUN_ISR (1 << 17)
#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
@ -113,15 +176,17 @@ struct controller {
int pciehp_sysfs_enable_slot(struct slot *slot);
int pciehp_sysfs_disable_slot(struct slot *slot);
void pciehp_queue_interrupt_event(struct slot *slot, u32 event_type);
void pciehp_request(struct controller *ctrl, int action);
void pciehp_handle_button_press(struct slot *slot);
void pciehp_handle_disable_request(struct slot *slot);
void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
int pciehp_configure_device(struct slot *p_slot);
int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
void pcie_reenable_notification(struct controller *ctrl);
void pcie_shutdown_notification(struct controller *ctrl);
void pcie_clear_hotplug_events(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);

View File

@ -26,11 +26,12 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include "../pci.h"
/* Global variables */
bool pciehp_debug;
bool pciehp_poll_mode;
int pciehp_poll_time;
static bool pciehp_force;
/*
* not really modular, but the easiest way to keep compat with existing
@ -39,11 +40,9 @@ static bool pciehp_force;
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
#define PCIE_MODULE_NAME "pciehp"
@ -56,17 +55,6 @@ static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int reset_slot(struct hotplug_slot *slot, int probe);
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
static int init_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
@ -107,15 +95,14 @@ static int init_slot(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug->info = info;
hotplug->private = slot;
hotplug->release = &release_slot;
hotplug->ops = ops;
slot->hotplug_slot = hotplug;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
retval = pci_hp_register(hotplug,
ctrl->pcie->port->subordinate, 0, name);
retval = pci_hp_initialize(hotplug,
ctrl->pcie->port->subordinate, 0, name);
if (retval)
ctrl_err(ctrl, "pci_hp_register failed: error %d\n", retval);
ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
out:
if (retval) {
kfree(ops);
@ -127,7 +114,12 @@ out:
static void cleanup_slot(struct controller *ctrl)
{
pci_hp_deregister(ctrl->slot->hotplug_slot);
struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
pci_hp_destroy(hotplug_slot);
kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
}
/*
@ -136,8 +128,11 @@ static void cleanup_slot(struct controller *ctrl)
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = hotplug_slot->private;
struct pci_dev *pdev = slot->ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
pciehp_set_attention_status(slot, status);
pci_config_pm_runtime_put(pdev);
return 0;
}
@ -160,8 +155,11 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct pci_dev *pdev = slot->ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
pciehp_get_power_status(slot, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
@ -176,16 +174,22 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct pci_dev *pdev = slot->ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
pciehp_get_latch_status(slot, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct pci_dev *pdev = slot->ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
pciehp_get_adapter_status(slot, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
@ -196,12 +200,40 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
return pciehp_reset_slot(slot, probe);
}
/**
* pciehp_check_presence() - synthesize event if presence has changed
*
* On probe and resume, an explicit presence check is necessary to bring up an
* occupied slot or bring down an unoccupied slot. This can't be triggered by
* events in the Slot Status register, they may be stale and are therefore
* cleared. Secondly, sending an interrupt for "events that occur while
* interrupt generation is disabled [when] interrupt generation is subsequently
* enabled" is optional per PCIe r4.0, sec 6.7.3.4.
*/
static void pciehp_check_presence(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
u8 occupied;
down_read(&ctrl->reset_lock);
mutex_lock(&slot->lock);
pciehp_get_adapter_status(slot, &occupied);
if ((occupied && (slot->state == OFF_STATE ||
slot->state == BLINKINGON_STATE)) ||
(!occupied && (slot->state == ON_STATE ||
slot->state == BLINKINGOFF_STATE)))
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
mutex_unlock(&slot->lock);
up_read(&ctrl->reset_lock);
}
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
struct slot *slot;
u8 occupied, poweron;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@ -238,21 +270,20 @@ static int pciehp_probe(struct pcie_device *dev)
goto err_out_free_ctrl_slot;
}
/* Check if slot is occupied */
/* Publish to user space */
slot = ctrl->slot;
pciehp_get_adapter_status(slot, &occupied);
pciehp_get_power_status(slot, &poweron);
if (occupied && pciehp_force) {
mutex_lock(&slot->hotplug_lock);
pciehp_enable_slot(slot);
mutex_unlock(&slot->hotplug_lock);
rc = pci_hp_add(slot->hotplug_slot);
if (rc) {
ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
goto err_out_shutdown_notification;
}
/* If empty slot's power status is on, turn power off */
if (!occupied && poweron && POWER_CTRL(ctrl))
pciehp_power_off_slot(slot);
pciehp_check_presence(ctrl);
return 0;
err_out_shutdown_notification:
pcie_shutdown_notification(ctrl);
err_out_free_ctrl_slot:
cleanup_slot(ctrl);
err_out_release_ctlr:
@ -264,6 +295,8 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
pci_hp_del(ctrl->slot->hotplug_slot);
pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
@ -274,27 +307,28 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
struct slot *slot = ctrl->slot;
/* pci_restore_state() just wrote to the Slot Control register */
ctrl->cmd_started = jiffies;
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
pcie_clear_hotplug_events(ctrl);
return 0;
}
static int pciehp_resume(struct pcie_device *dev)
{
struct controller *ctrl;
struct slot *slot;
u8 status;
struct controller *ctrl = get_service_data(dev);
ctrl = get_service_data(dev);
pciehp_check_presence(ctrl);
/* reinitialize the chipset's event detection logic */
pcie_reenable_notification(ctrl);
slot = ctrl->slot;
/* Check if slot is occupied */
pciehp_get_adapter_status(slot, &status);
mutex_lock(&slot->hotplug_lock);
if (status)
pciehp_enable_slot(slot);
else
pciehp_disable_slot(slot);
mutex_unlock(&slot->hotplug_lock);
return 0;
}
#endif /* PM */
@ -309,6 +343,7 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
#ifdef CONFIG_PM
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
#endif /* PM */
};

View File

@ -17,28 +17,11 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
static void interrupt_event_handler(struct work_struct *work);
void pciehp_queue_interrupt_event(struct slot *p_slot, u32 event_type)
{
struct event_info *info;
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (!info) {
ctrl_err(p_slot->ctrl, "dropped event %d (ENOMEM)\n", event_type);
return;
}
INIT_WORK(&info->work, interrupt_event_handler);
info->event_type = event_type;
info->p_slot = p_slot;
queue_work(p_slot->wq, &info->work);
}
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@ -119,14 +102,11 @@ err_exit:
* remove_board - Turns off slot and LEDs
* @p_slot: slot where board is being removed
*/
static int remove_board(struct slot *p_slot)
static void remove_board(struct slot *p_slot)
{
int retval;
struct controller *ctrl = p_slot->ctrl;
retval = pciehp_unconfigure_device(p_slot);
if (retval)
return retval;
pciehp_unconfigure_device(p_slot);
if (POWER_CTRL(ctrl)) {
pciehp_power_off_slot(p_slot);
@ -141,86 +121,30 @@ static int remove_board(struct slot *p_slot)
/* turn off Green LED */
pciehp_green_led_off(p_slot);
return 0;
}
struct power_work_info {
struct slot *p_slot;
struct work_struct work;
unsigned int req;
#define DISABLE_REQ 0
#define ENABLE_REQ 1
};
static int pciehp_enable_slot(struct slot *slot);
static int pciehp_disable_slot(struct slot *slot);
/**
* pciehp_power_thread - handle pushbutton events
* @work: &struct work_struct describing work to be done
*
* Scheduled procedure to handle blocking stuff for the pushbuttons.
* Handles all pending events and exits.
*/
static void pciehp_power_thread(struct work_struct *work)
void pciehp_request(struct controller *ctrl, int action)
{
struct power_work_info *info =
container_of(work, struct power_work_info, work);
struct slot *p_slot = info->p_slot;
int ret;
switch (info->req) {
case DISABLE_REQ:
mutex_lock(&p_slot->hotplug_lock);
pciehp_disable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
mutex_unlock(&p_slot->lock);
break;
case ENABLE_REQ:
mutex_lock(&p_slot->hotplug_lock);
ret = pciehp_enable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
if (ret)
pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
mutex_unlock(&p_slot->lock);
break;
default:
break;
}
kfree(info);
}
static void pciehp_queue_power_work(struct slot *p_slot, int req)
{
struct power_work_info *info;
p_slot->state = (req == ENABLE_REQ) ? POWERON_STATE : POWEROFF_STATE;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
ctrl_err(p_slot->ctrl, "no memory to queue %s request\n",
(req == ENABLE_REQ) ? "poweron" : "poweroff");
return;
}
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
info->req = req;
queue_work(p_slot->wq, &info->work);
atomic_or(action, &ctrl->pending_events);
if (!pciehp_poll_mode)
irq_wake_thread(ctrl->pcie->irq, ctrl);
}
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
struct slot *p_slot = container_of(work, struct slot, work.work);
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
pciehp_queue_power_work(p_slot, DISABLE_REQ);
pciehp_request(ctrl, DISABLE_SLOT);
break;
case BLINKINGON_STATE:
pciehp_queue_power_work(p_slot, ENABLE_REQ);
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
break;
default:
break;
@ -228,18 +152,15 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
/*
* Note: This function must be called with slot->lock held
*/
static void handle_button_press_event(struct slot *p_slot)
void pciehp_handle_button_press(struct slot *p_slot)
{
struct controller *ctrl = p_slot->ctrl;
u8 getstatus;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case STATIC_STATE:
pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
case OFF_STATE:
case ON_STATE:
if (p_slot->state == ON_STATE) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
slot_name(p_slot));
@ -251,7 +172,7 @@ static void handle_button_press_event(struct slot *p_slot)
/* blink green LED and turn off amber */
pciehp_green_led_blink(p_slot);
pciehp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
schedule_delayed_work(&p_slot->work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@ -262,118 +183,104 @@ static void handle_button_press_event(struct slot *p_slot)
*/
ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
if (p_slot->state == BLINKINGOFF_STATE) {
p_slot->state = ON_STATE;
pciehp_green_led_on(p_slot);
else
} else {
p_slot->state = OFF_STATE;
pciehp_green_led_off(p_slot);
}
pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
break;
case POWEROFF_STATE:
case POWERON_STATE:
/*
* Ignore if the slot is on power-on or power-off state;
* this means that the previous attention button action
* to hot-add or hot-remove is undergoing
*/
ctrl_info(ctrl, "Slot(%s): Button ignored\n",
slot_name(p_slot));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
slot_name(p_slot), p_slot->state);
break;
}
}
/*
* Note: This function must be called with slot->lock held
*/
static void handle_link_event(struct slot *p_slot, u32 event)
{
struct controller *ctrl = p_slot->ctrl;
switch (p_slot->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
/* Fall through */
case STATIC_STATE:
pciehp_queue_power_work(p_slot, event == INT_LINK_UP ?
ENABLE_REQ : DISABLE_REQ);
break;
case POWERON_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl, "Slot(%s): Link Up event ignored; already powering on\n",
slot_name(p_slot));
} else {
ctrl_info(ctrl, "Slot(%s): Link Down event queued; currently getting powered on\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, DISABLE_REQ);
}
break;
case POWEROFF_STATE:
if (event == INT_LINK_UP) {
ctrl_info(ctrl, "Slot(%s): Link Up event queued; currently getting powered off\n",
slot_name(p_slot));
pciehp_queue_power_work(p_slot, ENABLE_REQ);
} else {
ctrl_info(ctrl, "Slot(%s): Link Down event ignored; already powering off\n",
slot_name(p_slot));
}
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
slot_name(p_slot), p_slot->state);
break;
}
}
static void interrupt_event_handler(struct work_struct *work)
{
struct event_info *info = container_of(work, struct event_info, work);
struct slot *p_slot = info->p_slot;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (info->event_type) {
case INT_BUTTON_PRESS:
handle_button_press_event(p_slot);
break;
case INT_POWER_FAULT:
if (!POWER_CTRL(ctrl))
break;
pciehp_set_attention_status(p_slot, 1);
pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
pciehp_queue_power_work(p_slot, ENABLE_REQ);
break;
case INT_PRESENCE_OFF:
/*
* Regardless of surprise capability, we need to
* definitely remove a card that has been pulled out!
*/
pciehp_queue_power_work(p_slot, DISABLE_REQ);
break;
case INT_LINK_UP:
case INT_LINK_DOWN:
handle_link_event(p_slot, info->event_type);
break;
default:
break;
}
mutex_unlock(&p_slot->lock);
kfree(info);
}
/*
* Note: This function must be called with slot->hotplug_lock held
*/
int pciehp_enable_slot(struct slot *p_slot)
void pciehp_handle_disable_request(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
mutex_lock(&slot->lock);
switch (slot->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
cancel_delayed_work(&slot->work);
break;
}
slot->state = POWEROFF_STATE;
mutex_unlock(&slot->lock);
ctrl->request_result = pciehp_disable_slot(slot);
}
void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
{
struct controller *ctrl = slot->ctrl;
bool link_active;
u8 present;
/*
* If the slot is on and presence or link has changed, turn it off.
* Even if it's occupied again, we cannot assume the card is the same.
*/
mutex_lock(&slot->lock);
switch (slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&slot->work);
/* fall through */
case ON_STATE:
slot->state = POWEROFF_STATE;
mutex_unlock(&slot->lock);
if (events & PCI_EXP_SLTSTA_DLLSC)
ctrl_info(ctrl, "Slot(%s): Link Down\n",
slot_name(slot));
if (events & PCI_EXP_SLTSTA_PDC)
ctrl_info(ctrl, "Slot(%s): Card not present\n",
slot_name(slot));
pciehp_disable_slot(slot);
break;
default:
mutex_unlock(&slot->lock);
break;
}
/* Turn the slot on if it's occupied or link is up */
mutex_lock(&slot->lock);
pciehp_get_adapter_status(slot, &present);
link_active = pciehp_check_link_active(ctrl);
if (!present && !link_active) {
mutex_unlock(&slot->lock);
return;
}
switch (slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&slot->work);
/* fall through */
case OFF_STATE:
slot->state = POWERON_STATE;
mutex_unlock(&slot->lock);
if (present)
ctrl_info(ctrl, "Slot(%s): Card present\n",
slot_name(slot));
if (link_active)
ctrl_info(ctrl, "Slot(%s): Link Up\n",
slot_name(slot));
ctrl->request_result = pciehp_enable_slot(slot);
break;
default:
mutex_unlock(&slot->lock);
break;
}
}
static int __pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
@ -404,17 +311,29 @@ int pciehp_enable_slot(struct slot *p_slot)
return board_added(p_slot);
}
/*
* Note: This function must be called with slot->hotplug_lock held
*/
int pciehp_disable_slot(struct slot *p_slot)
static int pciehp_enable_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
ret = __pciehp_enable_slot(slot);
if (ret && ATTN_BUTTN(ctrl))
pciehp_green_led_off(slot); /* may be blinking */
pm_runtime_put(&ctrl->pcie->port->dev);
mutex_lock(&slot->lock);
slot->state = ret ? OFF_STATE : ON_STATE;
mutex_unlock(&slot->lock);
return ret;
}
static int __pciehp_disable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
struct controller *ctrl = p_slot->ctrl;
if (!p_slot->ctrl)
return 1;
if (POWER_CTRL(p_slot->ctrl)) {
pciehp_get_power_status(p_slot, &getstatus);
if (!getstatus) {
@ -424,32 +343,50 @@ int pciehp_disable_slot(struct slot *p_slot)
}
}
return remove_board(p_slot);
remove_board(p_slot);
return 0;
}
static int pciehp_disable_slot(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
ret = __pciehp_disable_slot(slot);
pm_runtime_put(&ctrl->pcie->port->dev);
mutex_lock(&slot->lock);
slot->state = OFF_STATE;
mutex_unlock(&slot->lock);
return ret;
}
int pciehp_sysfs_enable_slot(struct slot *p_slot)
{
int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
case STATIC_STATE:
p_slot->state = POWERON_STATE;
case OFF_STATE:
mutex_unlock(&p_slot->lock);
mutex_lock(&p_slot->hotplug_lock);
retval = pciehp_enable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
/*
* The IRQ thread becomes a no-op if the user pulls out the
* card before the thread wakes up, so initialize to -ENODEV.
*/
ctrl->request_result = -ENODEV;
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
wait_event(ctrl->requester,
!atomic_read(&ctrl->pending_events));
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
slot_name(p_slot));
break;
case BLINKINGOFF_STATE:
case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
slot_name(p_slot));
@ -461,32 +398,28 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
return retval;
return -ENODEV;
}
int pciehp_sysfs_disable_slot(struct slot *p_slot)
{
int retval = -ENODEV;
struct controller *ctrl = p_slot->ctrl;
mutex_lock(&p_slot->lock);
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
case ON_STATE:
mutex_unlock(&p_slot->lock);
mutex_lock(&p_slot->hotplug_lock);
retval = pciehp_disable_slot(p_slot);
mutex_unlock(&p_slot->hotplug_lock);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
!atomic_read(&ctrl->pending_events));
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
slot_name(p_slot));
break;
case BLINKINGON_STATE:
case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
slot_name(p_slot));
@ -498,5 +431,5 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot)
}
mutex_unlock(&p_slot->lock);
return retval;
return -ENODEV;
}

View File

@ -17,8 +17,9 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/slab.h>
@ -31,47 +32,24 @@ static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
return ctrl->pcie->port;
}
static irqreturn_t pcie_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
/* This is the interrupt polling timeout function. */
static void int_poll_timeout(struct timer_list *t)
{
struct controller *ctrl = from_timer(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
pcie_isr(0, ctrl);
if (!pciehp_poll_time)
pciehp_poll_time = 2; /* default polling interval is 2 sec */
start_int_poll_timer(ctrl, pciehp_poll_time);
}
/* This function starts the interrupt polling timer. */
static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
sec = 2;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
static irqreturn_t pciehp_isr(int irq, void *dev_id);
static irqreturn_t pciehp_ist(int irq, void *dev_id);
static int pciehp_poll(void *data);
static inline int pciehp_request_irq(struct controller *ctrl)
{
int retval, irq = ctrl->pcie->irq;
/* Install interrupt polling timer. Start with 10 sec delay */
if (pciehp_poll_mode) {
timer_setup(&ctrl->poll_timer, int_poll_timeout, 0);
start_int_poll_timer(ctrl, 10);
return 0;
ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
"pciehp_poll-%s",
slot_name(ctrl->slot));
return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
/* Installs the interrupt handler */
retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
IRQF_SHARED, MY_NAME, ctrl);
if (retval)
ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
irq);
@ -81,7 +59,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
static inline void pciehp_free_irq(struct controller *ctrl)
{
if (pciehp_poll_mode)
del_timer_sync(&ctrl->poll_timer);
kthread_stop(ctrl->poll_thread);
else
free_irq(ctrl->pcie->irq, ctrl);
}
@ -293,6 +271,11 @@ int pciehp_check_link_status(struct controller *ctrl)
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
/* ignore link or presence changes up to this point */
if (found)
atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
&ctrl->pending_events);
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
@ -339,7 +322,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(slot->ctrl);
u16 slot_ctrl;
pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
pci_config_pm_runtime_put(pdev);
*status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
return 0;
}
@ -350,7 +335,9 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
pci_config_pm_runtime_get(pdev);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
pci_config_pm_runtime_put(pdev);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
@ -425,9 +412,12 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
pcie_write_cmd_nowait(ctrl, status << 6,
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
pci_config_pm_runtime_put(pdev);
return 0;
}
@ -539,20 +529,35 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct pci_bus *subordinate = pdev->subordinate;
struct pci_dev *dev;
struct slot *slot = ctrl->slot;
struct device *parent = pdev->dev.parent;
u16 status, events;
u8 present;
bool link;
/* Interrupts cannot originate from a controller that's asleep */
/*
* Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
*/
if (pdev->current_state == PCI_D3cold)
return IRQ_NONE;
/*
* Keep the port accessible by holding a runtime PM ref on its parent.
* Defer resume of the parent to the IRQ thread if it's suspended.
* Mask the interrupt until then.
*/
if (parent) {
pm_runtime_get_noresume(parent);
if (!pm_runtime_active(parent)) {
pm_runtime_put(parent);
disable_irq_nosync(irq);
atomic_or(RERUN_ISR, &ctrl->pending_events);
return IRQ_WAKE_THREAD;
}
}
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
if (status == (u16) ~0) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
}
@ -571,86 +576,119 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
if (ctrl->power_fault_detected)
events &= ~PCI_EXP_SLTSTA_PFD;
if (!events)
if (!events) {
if (parent)
pm_runtime_put(parent);
return IRQ_NONE;
/* Capture link status before clearing interrupts */
if (events & PCI_EXP_SLTSTA_DLLSC)
link = pciehp_check_link_active(ctrl);
}
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
if (parent)
pm_runtime_put(parent);
/* Check Command Complete Interrupt Pending */
/*
* Command Completed notifications are not deferred to the
* IRQ thread because it may be waiting for their arrival.
*/
if (events & PCI_EXP_SLTSTA_CC) {
ctrl->cmd_busy = 0;
smp_mb();
wake_up(&ctrl->queue);
if (events == PCI_EXP_SLTSTA_CC)
return IRQ_HANDLED;
events &= ~PCI_EXP_SLTSTA_CC;
}
if (subordinate) {
list_for_each_entry(dev, &subordinate->devices, bus_list) {
if (dev->ignore_hotplug) {
ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
events, pci_name(dev));
return IRQ_HANDLED;
}
if (pdev->ignore_hotplug) {
ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
return IRQ_HANDLED;
}
/* Save pending events for consumption by IRQ thread. */
atomic_or(events, &ctrl->pending_events);
return IRQ_WAKE_THREAD;
}
static irqreturn_t pciehp_ist(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
struct slot *slot = ctrl->slot;
irqreturn_t ret;
u32 events;
pci_config_pm_runtime_get(pdev);
/* rerun pciehp_isr() if the port was inaccessible on interrupt */
if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
ret = pciehp_isr(irq, dev_id);
enable_irq(irq);
if (ret != IRQ_WAKE_THREAD) {
pci_config_pm_runtime_put(pdev);
return ret;
}
}
synchronize_hardirq(irq);
events = atomic_xchg(&ctrl->pending_events, 0);
if (!events) {
pci_config_pm_runtime_put(pdev);
return IRQ_NONE;
}
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_BUTTON_PRESS);
pciehp_handle_button_press(slot);
}
/*
* Check Link Status Changed at higher precedence than Presence
* Detect Changed. The PDS value may be set to "card present" from
* out-of-band detection, which may be in conflict with a Link Down
* and cause the wrong event to queue.
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
if (events & PCI_EXP_SLTSTA_DLLSC) {
ctrl_info(ctrl, "Slot(%s): Link %s\n", slot_name(slot),
link ? "Up" : "Down");
pciehp_queue_interrupt_event(slot, link ? INT_LINK_UP :
INT_LINK_DOWN);
} else if (events & PCI_EXP_SLTSTA_PDC) {
present = !!(status & PCI_EXP_SLTSTA_PDS);
ctrl_info(ctrl, "Slot(%s): Card %spresent\n", slot_name(slot),
present ? "" : "not ");
pciehp_queue_interrupt_event(slot, present ? INT_PRESENCE_ON :
INT_PRESENCE_OFF);
}
down_read(&ctrl->reset_lock);
if (events & DISABLE_SLOT)
pciehp_handle_disable_request(slot);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
pciehp_handle_presence_or_link_change(slot, events);
up_read(&ctrl->reset_lock);
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
pciehp_queue_interrupt_event(slot, INT_POWER_FAULT);
pciehp_set_attention_status(slot, 1);
pciehp_green_led_off(slot);
}
pci_config_pm_runtime_put(pdev);
wake_up(&ctrl->requester);
return IRQ_HANDLED;
}
static irqreturn_t pcie_isr(int irq, void *dev_id)
static int pciehp_poll(void *data)
{
irqreturn_t rc, handled = IRQ_NONE;
struct controller *ctrl = data;
/*
* To guarantee that all interrupt events are serviced, we need to
* re-inspect Slot Status register after clearing what is presumed
* to be the last pending interrupt.
*/
do {
rc = pciehp_isr(irq, dev_id);
if (rc == IRQ_HANDLED)
handled = IRQ_HANDLED;
} while (rc == IRQ_HANDLED);
schedule_timeout_idle(10 * HZ); /* start with 10 sec delay */
/* Return IRQ_HANDLED if we handled one or more events */
return handled;
while (!kthread_should_stop()) {
/* poll for interrupt events or user requests */
while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
atomic_read(&ctrl->pending_events))
pciehp_ist(IRQ_NOTCONNECTED, ctrl);
if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
pciehp_poll_time = 2; /* clamp to sane value */
schedule_timeout_idle(pciehp_poll_time * HZ);
}
return 0;
}
static void pcie_enable_notification(struct controller *ctrl)
@ -691,17 +729,6 @@ static void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
void pcie_reenable_notification(struct controller *ctrl)
{
/*
* Clear both Presence and Data Link Layer Changed to make sure
* those events still fire after we have re-enabled them.
*/
pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
pcie_enable_notification(ctrl);
}
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@ -715,6 +742,12 @@ static void pcie_disable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
}
void pcie_clear_hotplug_events(struct controller *ctrl)
{
pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
}
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@ -728,10 +761,13 @@ int pciehp_reset_slot(struct slot *slot, int probe)
struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
int rc;
if (probe)
return 0;
down_write(&ctrl->reset_lock);
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
stat_mask |= PCI_EXP_SLTSTA_PDC;
@ -742,18 +778,16 @@ int pciehp_reset_slot(struct slot *slot, int probe)
pcie_write_cmd(ctrl, 0, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
if (pciehp_poll_mode)
del_timer_sync(&ctrl->poll_timer);
pci_reset_bridge_secondary_bus(ctrl->pcie->port);
rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
if (pciehp_poll_mode)
int_poll_timeout(&ctrl->poll_timer);
return 0;
up_write(&ctrl->reset_lock);
return rc;
}
int pcie_init_notification(struct controller *ctrl)
@ -765,7 +799,7 @@ int pcie_init_notification(struct controller *ctrl)
return 0;
}
static void pcie_shutdown_notification(struct controller *ctrl)
void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
@ -776,32 +810,29 @@ static void pcie_shutdown_notification(struct controller *ctrl)
static int pcie_init_slot(struct controller *ctrl)
{
struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
struct slot *slot;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
slot->wq = alloc_ordered_workqueue("pciehp-%u", 0, PSN(ctrl));
if (!slot->wq)
goto abort;
down_read(&pci_bus_sem);
slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
up_read(&pci_bus_sem);
slot->ctrl = ctrl;
mutex_init(&slot->lock);
mutex_init(&slot->hotplug_lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
ctrl->slot = slot;
return 0;
abort:
kfree(slot);
return -ENOMEM;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
cancel_delayed_work_sync(&slot->work);
kfree(slot);
}
@ -826,6 +857,7 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
u8 occupied, poweron;
struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
@ -847,6 +879,8 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
init_rwsem(&ctrl->reset_lock);
init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
@ -855,16 +889,11 @@ struct controller *pcie_init(struct pcie_device *dev)
if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
ctrl->link_active_reporting = 1;
/*
* Clear all remaining event bits in Slot Status register except
* Presence Detect Changed. We want to make sure possible
* hotplug event is triggered when the interrupt is unmasked so
* that we don't lose that event.
*/
/* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
PCI_EXP_SLTSTA_DLLSC);
PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c LLActRep%c%s\n",
(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
@ -883,6 +912,19 @@ struct controller *pcie_init(struct pcie_device *dev)
if (pcie_init_slot(ctrl))
goto abort_ctrl;
/*
* If empty slot's power status is on, turn power off. The IRQ isn't
* requested yet, so avoid triggering a notification with this command.
*/
if (POWER_CTRL(ctrl)) {
pciehp_get_adapter_status(ctrl->slot, &occupied);
pciehp_get_power_status(ctrl->slot, &poweron);
if (!occupied && poweron) {
pcie_disable_notification(ctrl);
pciehp_power_off_slot(ctrl->slot);
}
}
return ctrl;
abort_ctrl:
@ -893,7 +935,6 @@ abort:
void pciehp_release_ctrl(struct controller *ctrl)
{
pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}

View File

@ -62,9 +62,8 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
int pciehp_unconfigure_device(struct slot *p_slot)
void pciehp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
u8 presence = 0;
struct pci_dev *dev, *temp;
struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
@ -107,5 +106,4 @@ int pciehp_unconfigure_device(struct slot *p_slot)
}
pci_unlock_rescan_remove();
return rc;
}

View File

@ -1,348 +0,0 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* PCI Hot Plug Controller Skeleton Driver - 0.3
*
* Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001,2003 IBM Corp.
*
* All rights reserved.
*
* This driver is to be used as a skeleton driver to show how to interface
* with the pci hotplug core easily.
*
* Send feedback to <greg@kroah.com>
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#define SLOT_NAME_SIZE 10
struct slot {
u8 number;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
char name[SLOT_NAME_SIZE];
};
static LIST_HEAD(slot_list);
#define MY_NAME "pcihp_skeleton"
#define dbg(format, arg...) \
do { \
if (debug) \
printk(KERN_DEBUG "%s: " format "\n", \
MY_NAME, ## arg); \
} while (0)
#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg)
/* local variables */
static bool debug;
static int num_slots;
#define DRIVER_VERSION "0.3"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
#define DRIVER_DESC "Hot Plug PCI Controller Skeleton Driver"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static int set_attention_status(struct hotplug_slot *slot, u8 value);
static int hardware_test(struct hotplug_slot *slot, u32 value);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
.hardware_test = hardware_test,
.get_power_status = get_power_status,
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
};
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in code here to enable the specified slot
*/
return retval;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in code here to disable the specified slot
*/
return retval;
}
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (status) {
case 0:
/*
* Fill in code here to turn light off
*/
break;
case 1:
default:
/*
* Fill in code here to turn light on
*/
break;
}
return retval;
}
static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
switch (value) {
case 0:
/* Specify a test here */
break;
case 1:
/* Specify another test here */
break;
}
return retval;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in logic to get the current power status of the specific
* slot and store it in the *value location.
*/
return retval;
}
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in logic to get the current attention status of the specific
* slot and store it in the *value location.
*/
return retval;
}
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in logic to get the current latch status of the specific
* slot and store it in the *value location.
*/
return retval;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
int retval = 0;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
/*
* Fill in logic to get the current adapter status of the specific
* slot and store it in the *value location.
*/
return retval;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static void make_slot_name(struct slot *slot)
{
/*
* Stupid way to make a filename out of the slot name.
* replace this if your hardware provides a better way to name slots.
*/
snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", slot->number);
}
/**
* init_slots - initialize 'struct slot' structures for each slot
*
*/
static int __init init_slots(void)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
int retval;
int i;
/*
* Create a structure for each slot, and register that slot
* with the pci_hotplug subsystem.
*/
for (i = 0; i < num_slots; ++i) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
retval = -ENOMEM;
goto error;
}
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
if (!hotplug_slot) {
retval = -ENOMEM;
goto error_slot;
}
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
retval = -ENOMEM;
goto error_hpslot;
}
hotplug_slot->info = info;
slot->number = i;
hotplug_slot->name = slot->name;
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
make_slot_name(slot);
hotplug_slot->ops = &skel_hotplug_slot_ops;
/*
* Initialize the slot info structure with some known
* good values.
*/
get_power_status(hotplug_slot, &info->power_status);
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
dbg("registering slot %d\n", i);
retval = pci_hp_register(slot->hotplug_slot);
if (retval) {
err("pci_hp_register failed with error %d\n", retval);
goto error_info;
}
/* add slot to our internal list */
list_add(&slot->slot_list, &slot_list);
}
return 0;
error_info:
kfree(info);
error_hpslot:
kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
return retval;
}
static void __exit cleanup_slots(void)
{
struct slot *slot, *next;
/*
* Unregister all of our slots with the pci_hotplug subsystem.
* Memory will be freed in release_slot() callback after slot's
* lifespan is finished.
*/
list_for_each_entry_safe(slot, next, &slot_list, slot_list) {
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
}
}
static int __init pcihp_skel_init(void)
{
int retval;
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
/*
* Do specific initialization stuff for your driver here
* like initializing your controller hardware (if any) and
* determining the number of slots you have in the system
* right now.
*/
num_slots = 5;
return init_slots();
}
static void __exit pcihp_skel_exit(void)
{
/*
* Clean everything up.
*/
cleanup_slots();
}
module_init(pcihp_skel_init);
module_exit(pcihp_skel_exit);

View File

@ -538,9 +538,8 @@ static struct hotplug_slot_ops php_slot_ops = {
.disable_slot = pnv_php_disable_slot,
};
static void pnv_php_release(struct hotplug_slot *slot)
static void pnv_php_release(struct pnv_php_slot *php_slot)
{
struct pnv_php_slot *php_slot = slot->private;
unsigned long flags;
/* Remove from global or child list */
@ -596,7 +595,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
php_slot->slot.info = &php_slot->slot_info;
php_slot->slot.release = pnv_php_release;
php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
@ -924,6 +922,7 @@ static void pnv_php_unregister_one(struct device_node *dn)
php_slot->state = PNV_PHP_STATE_OFFLINE;
pci_hp_deregister(&php_slot->slot);
pnv_php_release(php_slot);
pnv_php_put_slot(php_slot);
}

View File

@ -404,13 +404,13 @@ static void __exit cleanup_slots(void)
/*
* Unregister all of our slots with the pci_hotplug subsystem,
* and free up all memory that we had allocated.
* memory will be freed in release_slot callback.
*/
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
pci_hp_deregister(slot->hotplug_slot);
dealloc_slot_struct(slot);
}
return;
}

View File

@ -19,12 +19,6 @@
#include "rpaphp.h"
/* free up the memory used by a slot */
static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = (struct slot *) hotplug_slot->private;
dealloc_slot_struct(slot);
}
void dealloc_slot_struct(struct slot *slot)
{
kfree(slot->hotplug_slot->info);
@ -56,7 +50,6 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot->power_domain = power_domain;
slot->hotplug_slot->private = slot;
slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
slot->hotplug_slot->release = &rpaphp_release_slot;
return (slot);
@ -90,10 +83,8 @@ int rpaphp_deregister_slot(struct slot *slot)
__func__, slot->name);
list_del(&slot->rpaphp_slot_list);
retval = pci_hp_deregister(php_slot);
if (retval)
err("Problem unregistering a slot %s\n", slot->name);
pci_hp_deregister(php_slot);
dealloc_slot_struct(slot);
dbg("%s - Exit: rc[%d]\n", __func__, retval);
return retval;

View File

@ -130,15 +130,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@ -175,7 +166,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
hotplug_slot->info = info;
hotplug_slot->ops = &s390_hotplug_slot_ops;
hotplug_slot->release = &release_slot;
get_power_status(hotplug_slot, &info->power_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
@ -209,5 +199,8 @@ void zpci_exit_slot(struct zpci_dev *zdev)
continue;
list_del(&slot->slot_list);
pci_hp_deregister(slot->hotplug_slot);
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
}

View File

@ -628,7 +628,6 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
goto alloc_err;
}
bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
if (rc)
@ -656,8 +655,10 @@ alloc_err:
sn_release_slot(bss_hotplug_slot);
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy()))
while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
sn_release_slot(bss_hotplug_slot);
}
return rc;
}
@ -703,8 +704,10 @@ static void __exit sn_pci_hotplug_exit(void)
{
struct hotplug_slot *bss_hotplug_slot;
while ((bss_hotplug_slot = sn_hp_destroy()))
while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
sn_release_slot(bss_hotplug_slot);
}
if (!list_empty(&sn_hp_list))
printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);

View File

@ -61,22 +61,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status,
};
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static int init_slots(struct controller *ctrl)
{
struct slot *slot;
@ -125,7 +109,6 @@ static int init_slots(struct controller *ctrl)
/* register this slot with the hotplug pci core */
hotplug_slot->private = slot;
hotplug_slot->release = &release_slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@ -171,6 +154,9 @@ void cleanup_slots(struct controller *ctrl)
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
pci_hp_deregister(slot->hotplug_slot);
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
}
@ -270,11 +256,30 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
static bool shpc_capable(struct pci_dev *bridge)
{
/*
* It is assumed that AMD GOLAM chips support SHPC but they do not
* have SHPC capability.
*/
if (bridge->vendor == PCI_VENDOR_ID_AMD &&
bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return true;
if (pci_find_capability(bridge, PCI_CAP_ID_SHPC))
return true;
return false;
}
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct controller *ctrl;
if (!shpc_capable(pdev))
return -ENODEV;
if (acpi_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
@ -303,6 +308,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_cleanup_slots;
pdev->shpc_managed = 1;
return 0;
err_cleanup_slots:
@ -319,6 +325,7 @@ static void shpc_remove(struct pci_dev *dev)
{
struct controller *ctrl = pci_get_drvdata(dev);
dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
ctrl->hpc_ops->release_ctlr(ctrl);
kfree(ctrl);

View File

@ -654,6 +654,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGON_STATE:
cancel_delayed_work(&p_slot->work);
/* fall through */
case STATIC_STATE:
p_slot->state = POWERON_STATE;
mutex_unlock(&p_slot->lock);
@ -689,6 +690,7 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot)
switch (p_slot->state) {
case BLINKINGOFF_STATE:
cancel_delayed_work(&p_slot->work);
/* fall through */
case STATIC_STATE:
p_slot->state = POWEROFF_STATE;
mutex_unlock(&p_slot->lock);

View File

@ -818,15 +818,15 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
if (!dev->is_physfn)
return -ENOSYS;
if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
return -EBUSY;
else
dev->sriov->driver_max_VFs = numvfs;
dev->sriov->driver_max_VFs = numvfs;
return 0;
}
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);

View File

@ -86,13 +86,17 @@ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler,
va_list ap;
int ret;
char *devname;
unsigned long irqflags = IRQF_SHARED;
if (!handler)
irqflags |= IRQF_ONESHOT;
va_start(ap, fmt);
devname = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap);
ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn,
IRQF_SHARED, devname, dev_id);
irqflags, devname, dev_id);
if (ret)
kfree(devname);
return ret;

View File

@ -1446,6 +1446,9 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
info->flags |= MSI_FLAG_MUST_REACTIVATE;
/* PCI-MSI is oneshot-safe */
info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
domain = msi_create_irq_domain(fwnode, info, parent);
if (!domain)
return NULL;

View File

@ -266,7 +266,7 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
struct list_head *resources, resource_size_t *io_base)
{
struct device_node *dev_node = dev->of_node;
struct resource *res;
struct resource *res, tmp_res;
struct resource *bus_range;
struct of_pci_range range;
struct of_pci_range_parser parser;
@ -320,18 +320,16 @@ int devm_of_pci_get_host_bridge_resources(struct device *dev,
if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
continue;
res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL);
err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
if (err)
continue;
res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto failed;
}
err = of_pci_range_to_resource(&range, dev_node, res);
if (err) {
devm_kfree(dev, res);
continue;
}
if (resource_type(res) == IORESOURCE_IO) {
if (!io_base) {
dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",

View File

@ -403,24 +403,7 @@ bool pciehp_is_native(struct pci_dev *bridge)
*/
bool shpchp_is_native(struct pci_dev *bridge)
{
const struct pci_host_bridge *host;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_SHPC))
return false;
/*
* It is assumed that AMD GOLAM chips support SHPC but they do not
* have SHPC capability.
*/
if (bridge->vendor == PCI_VENDOR_ID_AMD &&
bridge->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
return true;
if (!pci_find_capability(bridge, PCI_CAP_ID_SHPC))
return false;
host = pci_find_host_bridge(bridge->bus);
return host->native_shpc_hotplug;
return bridge->shpc_managed;
}
/**

View File

@ -1668,7 +1668,7 @@ static int __init pci_driver_init(void)
if (ret)
return ret;
#endif
dma_debug_add_bus(&pci_bus_type);
return 0;
}
postcore_initcall(pci_driver_init);

View File

@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/security.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
@ -1449,7 +1448,9 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
if (val != 1)
return -EINVAL;
pm_runtime_get_sync(dev);
result = pci_reset_function(pdev);
pm_runtime_put(dev);
if (result < 0)
return result;
@ -1746,6 +1747,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#endif
&pci_bridge_attr_group,
&pcie_dev_attr_group,
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
#endif
NULL,
};

View File

@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
#include <linux/device.h>
@ -115,6 +114,9 @@ static bool pcie_ari_disabled;
/* If set, the PCIe ATS capability will not be used. */
static bool pcie_ats_disabled;
/* If set, the PCI config space of each device is printed during boot. */
bool pci_early_dump;
bool pci_ats_disabled(void)
{
return pcie_ats_disabled;
@ -191,6 +193,168 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
#endif
/**
* pci_dev_str_match_path - test if a path string matches a device
* @dev: the PCI device to test
* @p: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
* path of device/function addresses matches a PCI device. The string must
* be of the form:
*
* [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
*
* A path for a device can be obtained using 'lspci -t'. Using a path
* is more robust against bus renumbering than using only a single bus,
* device and function address.
*
* Returns 1 if the string matches the device, 0 if it does not and
* a negative error code if it fails to parse the string.
*/
static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
const char **endptr)
{
int ret;
int seg, bus, slot, func;
char *wpath, *p;
char end;
*endptr = strchrnul(path, ';');
wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
if (!wpath)
return -ENOMEM;
while (1) {
p = strrchr(wpath, '/');
if (!p)
break;
ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
if (ret != 2) {
ret = -EINVAL;
goto free_and_exit;
}
if (dev->devfn != PCI_DEVFN(slot, func)) {
ret = 0;
goto free_and_exit;
}
/*
* Note: we don't need to get a reference to the upstream
* bridge because we hold a reference to the top level
* device which should hold a reference to the bridge,
* and so on.
*/
dev = pci_upstream_bridge(dev);
if (!dev) {
ret = 0;
goto free_and_exit;
}
*p = 0;
}
ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
&func, &end);
if (ret != 4) {
seg = 0;
ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
if (ret != 3) {
ret = -EINVAL;
goto free_and_exit;
}
}
ret = (seg == pci_domain_nr(dev->bus) &&
bus == dev->bus->number &&
dev->devfn == PCI_DEVFN(slot, func));
free_and_exit:
kfree(wpath);
return ret;
}
/**
* pci_dev_str_match - test if a string matches a device
* @dev: the PCI device to test
* @p: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) matches a specified
* PCI device. The string may be of one of the following formats:
*
* [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
* pci:<vendor>:<device>[:<subvendor>:<subdevice>]
*
* The first format specifies a PCI bus/device/function address which
* may change if new hardware is inserted, if motherboard firmware changes,
* or due to changes caused in kernel parameters. If the domain is
* left unspecified, it is taken to be 0. In order to be robust against
* bus renumbering issues, a path of PCI device/function numbers may be used
* to address the specific device. The path for a device can be determined
* through the use of 'lspci -t'.
*
* The second format matches devices using IDs in the configuration
* space which may match multiple devices in the system. A value of 0
* for any field will match all devices. (Note: this differs from
* in-kernel code that uses PCI_ANY_ID which is ~0; this is for
* legacy reasons and convenience so users don't have to specify
* FFFFFFFFs on the command line.)
*
* Returns 1 if the string matches the device, 0 if it does not and
* a negative error code if the string cannot be parsed.
*/
static int pci_dev_str_match(struct pci_dev *dev, const char *p,
const char **endptr)
{
int ret;
int count;
unsigned short vendor, device, subsystem_vendor, subsystem_device;
if (strncmp(p, "pci:", 4) == 0) {
/* PCI vendor/device (subvendor/subdevice) IDs are specified */
p += 4;
ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
&subsystem_vendor, &subsystem_device, &count);
if (ret != 4) {
ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
if (ret != 2)
return -EINVAL;
subsystem_vendor = 0;
subsystem_device = 0;
}
p += count;
if ((!vendor || vendor == dev->vendor) &&
(!device || device == dev->device) &&
(!subsystem_vendor ||
subsystem_vendor == dev->subsystem_vendor) &&
(!subsystem_device ||
subsystem_device == dev->subsystem_device))
goto found;
} else {
/*
* PCI Bus, Device, Function IDs are specified
* (optionally, may include a path of devfns following it)
*/
ret = pci_dev_str_match_path(dev, p, &p);
if (ret < 0)
return ret;
else if (ret)
goto found;
}
*endptr = p;
return 0;
found:
*endptr = p;
return 1;
}
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap, int *ttl)
@ -1171,6 +1335,33 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
static void pci_restore_rebar_state(struct pci_dev *pdev)
{
unsigned int pos, nbars, i;
u32 ctrl;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
if (!pos)
return;
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
for (i = 0; i < nbars; i++, pos += 8) {
struct resource *res;
int bar_idx, size;
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
res = pdev->resource + bar_idx;
size = order_base_2((resource_size(res) >> 20) | 1) - 1;
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
}
}
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: - PCI device that we're dealing with
@ -1186,6 +1377,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_pri_state(dev);
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
pci_restore_rebar_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@ -2045,6 +2237,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
case PCI_D2:
if (pci_no_d1d2(dev))
break;
/* else: fall through */
default:
target_state = state;
}
@ -2290,7 +2483,7 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev)
* @bridge: Bridge to check
*
* This function checks if it is possible to move the bridge to D3.
* Currently we only allow D3 for recent enough PCIe ports.
* Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@ -2305,18 +2498,27 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
* Hotplug interrupts cannot be delivered if the link is down,
* so parents of a hotplug port must stay awake. In addition,
* hotplug ports handled by firmware in System Management Mode
* Hotplug ports handled by firmware in System Management Mode
* may not be put into D3 by the OS (Thunderbolt on non-Macs).
* For simplicity, disallow in general for now.
*/
if (bridge->is_hotplug_bridge)
if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
return true;
/* Even the oldest 2010 Thunderbolt controller supports D3. */
if (bridge->is_thunderbolt)
return true;
/*
* Hotplug ports handled natively by the OS were not validated
* by vendors for runtime D3 at least until 2018 because there
* was no OS support.
*/
if (bridge->is_hotplug_bridge)
return false;
/*
* It should be safe to put PCIe ports from 2015 or newer
* to D3.
@ -2820,6 +3022,66 @@ void pci_request_acs(void)
pci_acs_enable = 1;
}
static const char *disable_acs_redir_param;
/**
* pci_disable_acs_redir - disable ACS redirect capabilities
* @dev: the PCI device
*
* For only devices specified in the disable_acs_redir parameter.
*/
static void pci_disable_acs_redir(struct pci_dev *dev)
{
int ret = 0;
const char *p;
int pos;
u16 ctrl;
if (!disable_acs_redir_param)
return;
p = disable_acs_redir_param;
while (*p) {
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
disable_acs_redir_param);
break;
} else if (ret == 1) {
/* Found a match */
break;
}
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
break;
}
p++;
}
if (ret != 1)
return;
if (!pci_dev_specific_disable_acs_redir(dev))
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos) {
pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
return;
}
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
/* P2P Request & Completion Redirect */
ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
pci_info(dev, "disabled ACS redirect\n");
}
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
@ -2859,12 +3121,22 @@ static void pci_std_enable_acs(struct pci_dev *dev)
void pci_enable_acs(struct pci_dev *dev)
{
if (!pci_acs_enable)
return;
goto disable_acs_redir;
if (!pci_dev_specific_enable_acs(dev))
return;
goto disable_acs_redir;
pci_std_enable_acs(dev);
disable_acs_redir:
/*
* Note: pci_disable_acs_redir() must be called even if ACS was not
* enabled by the kernel because it may have been enabled by
* platform firmware. So if we are told to disable it, we should
* always disable it after setting the kernel's default
* preferences.
*/
pci_disable_acs_redir(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@ -3070,7 +3342,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
return pos;
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
}
/**
@ -3093,7 +3365,7 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= size << 8;
ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
return 0;
}
@ -4077,7 +4349,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
* Returns true if the device advertises support for PCIe function level
* resets.
*/
static bool pcie_has_flr(struct pci_dev *dev)
bool pcie_has_flr(struct pci_dev *dev)
{
u32 cap;
@ -4087,6 +4359,7 @@ static bool pcie_has_flr(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
return cap & PCI_EXP_DEVCAP_FLR;
}
EXPORT_SYMBOL_GPL(pcie_has_flr);
/**
* pcie_flr - initiate a PCIe function level reset
@ -4262,19 +4535,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
}
/**
* pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
* pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
* @dev: Bridge device
*
* Use the bridge control register to assert reset on the secondary bus.
* Devices on the secondary bus are left in power-on state.
*/
int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
@ -4291,9 +4563,7 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
pci_reset_bridge_secondary_bus(dev->bus->self);
return 0;
return pci_bridge_secondary_bus_reset(dev->bus->self);
}
static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
@ -4825,7 +5095,7 @@ int pci_probe_reset_slot(struct pci_slot *slot)
EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
/**
* pci_reset_slot - reset a PCI slot
* __pci_reset_slot - Try to reset a PCI slot
* @slot: PCI slot to reset
*
* A PCI bus may host multiple slots, each slot may support a reset mechanism
@ -4837,33 +5107,9 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
* through this function. PCI config space of all devices in the slot and
* behind the slot is saved before and restored after reset.
*
* Return 0 on success, non-zero on error.
*/
int pci_reset_slot(struct pci_slot *slot)
{
int rc;
rc = pci_slot_reset(slot, 1);
if (rc)
return rc;
pci_slot_save_and_disable(slot);
rc = pci_slot_reset(slot, 0);
pci_slot_restore(slot);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_slot);
/**
* pci_try_reset_slot - Try to reset a PCI slot
* @slot: PCI slot to reset
*
* Same as above except return -EAGAIN if the slot cannot be locked
*/
int pci_try_reset_slot(struct pci_slot *slot)
static int __pci_reset_slot(struct pci_slot *slot)
{
int rc;
@ -4884,10 +5130,11 @@ int pci_try_reset_slot(struct pci_slot *slot)
return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
int ret;
if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
@ -4898,11 +5145,11 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
might_sleep();
pci_reset_bridge_secondary_bus(bus->self);
ret = pci_bridge_secondary_bus_reset(bus->self);
pci_bus_unlock(bus);
return 0;
return ret;
}
/**
@ -4918,39 +5165,12 @@ int pci_probe_reset_bus(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
/**
* pci_reset_bus - reset a PCI bus
* @bus: top level PCI bus to reset
*
* Do a bus reset on the given bus and any subordinate buses, saving
* and restoring state of all devices.
*
* Return 0 on success, non-zero on error.
*/
int pci_reset_bus(struct pci_bus *bus)
{
int rc;
rc = pci_bus_reset(bus, 1);
if (rc)
return rc;
pci_bus_save_and_disable(bus);
rc = pci_bus_reset(bus, 0);
pci_bus_restore(bus);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
* pci_try_reset_bus - Try to reset a PCI bus
* __pci_reset_bus - Try to reset a PCI bus
* @bus: top level PCI bus to reset
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
int pci_try_reset_bus(struct pci_bus *bus)
static int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
@ -4962,7 +5182,7 @@ int pci_try_reset_bus(struct pci_bus *bus)
if (pci_bus_trylock(bus)) {
might_sleep();
pci_reset_bridge_secondary_bus(bus->self);
rc = pci_bridge_secondary_bus_reset(bus->self);
pci_bus_unlock(bus);
} else
rc = -EAGAIN;
@ -4971,7 +5191,19 @@ int pci_try_reset_bus(struct pci_bus *bus)
return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_bus);
/**
* pci_reset_bus - Try to reset a PCI bus
* @pdev: top level PCI device to reset via slot/bus
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
int pci_reset_bus(struct pci_dev *pdev)
{
return pci_probe_reset_slot(pdev->slot) ?
__pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
}
EXPORT_SYMBOL_GPL(pci_reset_bus);
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
@ -5304,14 +5536,16 @@ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
}
/**
* pcie_print_link_status - Report the PCI device's link speed and width
* __pcie_print_link_status - Report the PCI device's link speed and width
* @dev: PCI device to query
* @verbose: Print info even when enough bandwidth is available
*
* Report the available bandwidth at the device. If this is less than the
* device is capable of, report the device's maximum possible bandwidth and
* the upstream link that limits its performance to less than that.
* If the available bandwidth at the device is less than the device is
* capable of, report the device's maximum possible bandwidth and the
* upstream link that limits its performance. If @verbose, always print
* the available bandwidth, even if the device isn't constrained.
*/
void pcie_print_link_status(struct pci_dev *dev)
void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
{
enum pcie_link_width width, width_cap;
enum pci_bus_speed speed, speed_cap;
@ -5321,11 +5555,11 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
if (bw_avail >= bw_cap)
if (bw_avail >= bw_cap && verbose)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
else
else if (bw_avail < bw_cap)
pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
bw_avail / 1000, bw_avail % 1000,
PCIE_SPEED2STR(speed), width,
@ -5333,6 +5567,17 @@ void pcie_print_link_status(struct pci_dev *dev)
bw_cap / 1000, bw_cap % 1000,
PCIE_SPEED2STR(speed_cap), width_cap);
}
/**
* pcie_print_link_status - Report the PCI device's link speed and width
* @dev: PCI device to query
*
* Report the available bandwidth at the device.
*/
void pcie_print_link_status(struct pci_dev *dev)
{
__pcie_print_link_status(dev, true);
}
EXPORT_SYMBOL(pcie_print_link_status);
/**
@ -5427,8 +5672,19 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
* @dev: the PCI device for which alias is added
* @devfn: alias slot and function
*
* This helper encodes 8-bit devfn as bit number in dma_alias_mask.
* It should be called early, preferably as PCI fixup header quirk.
* This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
* which is used to program permissible bus-devfn source addresses for DMA
* requests in an IOMMU. These aliases factor into IOMMU group creation
* and are useful for devices generating DMA requests beyond or different
* from their logical bus-devfn. Examples include device quirks where the
* device simply uses the wrong devfn, as well as non-transparent bridges
* where the alias may be a proxy for devices in another domain.
*
* IOMMU group creation is performed during device discovery or addition,
* prior to any potential DMA mapping and therefore prior to driver probing
* (especially for userspace assigned devices where IOMMU group definition
* cannot be left as a userspace activity). DMA aliases should therefore
* be configured via quirks, such as the PCI fixup header quirk.
*/
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
@ -5494,10 +5750,10 @@ static DEFINE_SPINLOCK(resource_alignment_lock);
static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
bool *resize)
{
int seg, bus, slot, func, align_order, count;
unsigned short vendor, device, subsystem_vendor, subsystem_device;
int align_order, count;
resource_size_t align = pcibios_default_alignment();
char *p;
const char *p;
int ret;
spin_lock(&resource_alignment_lock);
p = resource_alignment_param;
@ -5517,58 +5773,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
} else {
align_order = -1;
}
if (strncmp(p, "pci:", 4) == 0) {
/* PCI vendor/device (subvendor/subdevice) ids are specified */
p += 4;
if (sscanf(p, "%hx:%hx:%hx:%hx%n",
&vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
p);
break;
}
subsystem_vendor = subsystem_device = 0;
}
p += count;
if ((!vendor || (vendor == dev->vendor)) &&
(!device || (device == dev->device)) &&
(!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
(!subsystem_device || (subsystem_device == dev->subsystem_device))) {
*resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
align = 1 << align_order;
/* Found */
break;
}
}
else {
if (sscanf(p, "%x:%x:%x.%x%n",
&seg, &bus, &slot, &func, &count) != 4) {
seg = 0;
if (sscanf(p, "%x:%x.%x%n",
&bus, &slot, &func, &count) != 3) {
/* Invalid format */
printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
p);
break;
}
}
p += count;
if (seg == pci_domain_nr(dev->bus) &&
bus == dev->bus->number &&
slot == PCI_SLOT(dev->devfn) &&
func == PCI_FUNC(dev->devfn)) {
*resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
align = 1 << align_order;
/* Found */
break;
}
ret = pci_dev_str_match(dev, p, &p);
if (ret == 1) {
*resize = true;
if (align_order == -1)
align = PAGE_SIZE;
else
align = 1 << align_order;
break;
} else if (ret < 0) {
pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
p);
break;
}
if (*p != ';' && *p != ',') {
/* End of param or invalid format */
break;
@ -5845,6 +6064,8 @@ static int __init pci_setup(char *str)
pcie_ats_disabled = true;
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
} else if (!strcmp(str, "earlydump")) {
pci_early_dump = true;
} else if (!strncmp(str, "realloc=", 8)) {
pci_realloc_get_opt(str + 8);
} else if (!strncmp(str, "realloc", 7)) {
@ -5881,6 +6102,8 @@ static int __init pci_setup(char *str)
pcie_bus_config = PCIE_BUS_PEER2PEER;
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);

View File

@ -7,6 +7,7 @@
#define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
@ -33,6 +34,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
enum pci_mmap_api mmap_api);
int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@ -225,6 +227,10 @@ enum pci_bar_type {
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
@ -259,6 +265,7 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
/* Single Root I/O Virtualization */
struct pci_sriov {
@ -311,6 +318,34 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev)
return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
unsigned int __pad1:5;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
unsigned int __pad2:2;
unsigned int tlp_header_valid:1;
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
struct aer_header_log_regs tlp; /* TLP Header */
};
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@ -367,6 +402,25 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
}
void pci_enable_acs(struct pci_dev *dev);
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
int pci_dev_specific_enable_acs(struct pci_dev *dev);
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
#else
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
{
return -ENOTTY;
}
static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
return -ENOTTY;
}
static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
return -ENOTTY;
}
#endif
/* PCI error reporting and recovery */
void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
@ -467,4 +521,19 @@ static inline int devm_of_pci_get_host_bridge_resources(struct device *dev,
}
#endif
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
void pci_aer_clear_device_status(struct pci_dev *dev);
#else
static inline void pci_no_aer(void) { }
static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
static inline void pci_aer_exit(struct pci_dev *d) { }
static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { }
static inline void pci_aer_clear_device_status(struct pci_dev *dev) { }
#endif
#endif /* DRIVERS_PCI_H */

View File

@ -31,26 +31,9 @@
#include "portdrv.h"
#define AER_ERROR_SOURCES_MAX 100
#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
unsigned int __pad1:5;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
unsigned int __pad2:2;
unsigned int tlp_header_valid:1;
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
struct aer_header_log_regs tlp; /* TLP Header */
};
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
unsigned int status;
@ -76,6 +59,42 @@ struct aer_rpc {
*/
};
/* AER stats for the device */
struct aer_stats {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
* end point is causing problems, the AER counters may increment
* at its link partner (e.g. root port) because the errors will be
* "seen" by the link partner and not the the problematic end point
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
/* Counters for different type of correctable errors */
u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
/* Counters for different type of fatal uncorrectable errors */
u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
/* Counters for different type of nonfatal uncorrectable errors */
u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
/* Total number of ERR_COR sent by this device */
u64 dev_total_cor_errs;
/* Total number of ERR_FATAL sent by this device */
u64 dev_total_fatal_errs;
/* Total number of ERR_NONFATAL sent by this device */
u64 dev_total_nonfatal_errs;
/*
* Fields for Root ports & root complex event collectors only, these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
* messages received by the root port / event collector, INCLUDING the
* ones that are generated internally (by the rootport itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
@ -303,12 +322,13 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return 0;
if (pcie_ports_native)
return 0;
if (!dev->__aer_firmware_first_valid)
aer_set_firmware_first(dev);
return dev->__aer_firmware_first;
}
#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
static bool aer_firmware_first;
@ -323,6 +343,9 @@ bool aer_acpi_firmware_first(void)
.firmware_first = 0,
};
if (pcie_ports_native)
return false;
if (!parsed) {
apei_hest_parse(aer_hest_parse, &info);
aer_firmware_first = info.firmware_first;
@ -357,16 +380,30 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
void pci_aer_clear_device_status(struct pci_dev *dev)
{
u16 sta;
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
}
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
int pos;
u32 status;
u32 status, sev;
pos = dev->aer_cap;
if (!pos)
return -EIO;
if (pcie_aer_get_firmware_first(dev))
return -EIO;
/* Clear status bits for ERR_NONFATAL errors only */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
status &= ~sev;
if (status)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
@ -374,6 +411,26 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
void pci_aer_clear_fatal_status(struct pci_dev *dev)
{
int pos;
u32 status, sev;
pos = dev->aer_cap;
if (!pos)
return;
if (pcie_aer_get_firmware_first(dev))
return;
/* Clear status bits for ERR_FATAL errors only */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
status &= sev;
if (status)
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
}
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
int pos;
@ -387,6 +444,9 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
if (!pos)
return -EIO;
if (pcie_aer_get_firmware_first(dev))
return -EIO;
port_type = pci_pcie_type(dev);
if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
@ -402,10 +462,20 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
return 0;
}
int pci_aer_init(struct pci_dev *dev)
void pci_aer_init(struct pci_dev *dev)
{
dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
return pci_cleanup_aer_error_status_regs(dev);
if (dev->aer_cap)
dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
pci_cleanup_aer_error_status_regs(dev);
}
void pci_aer_exit(struct pci_dev *dev)
{
kfree(dev->aer_stats);
dev->aer_stats = NULL;
}
#define AER_AGENT_RECEIVER 0
@ -458,52 +528,52 @@ static const char *aer_error_layer[] = {
"Transaction Layer"
};
static const char *aer_correctable_error_string[] = {
"Receiver Error", /* Bit Position 0 */
static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
"RxErr", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
"Bad TLP", /* Bit Position 6 */
"Bad DLLP", /* Bit Position 7 */
"RELAY_NUM Rollover", /* Bit Position 8 */
"BadTLP", /* Bit Position 6 */
"BadDLLP", /* Bit Position 7 */
"Rollover", /* Bit Position 8 */
NULL,
NULL,
NULL,
"Replay Timer Timeout", /* Bit Position 12 */
"Advisory Non-Fatal", /* Bit Position 13 */
"Corrected Internal Error", /* Bit Position 14 */
"Header Log Overflow", /* Bit Position 15 */
"Timeout", /* Bit Position 12 */
"NonFatalErr", /* Bit Position 13 */
"CorrIntErr", /* Bit Position 14 */
"HeaderOF", /* Bit Position 15 */
};
static const char *aer_uncorrectable_error_string[] = {
static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
NULL,
"Data Link Protocol", /* Bit Position 4 */
"Surprise Down Error", /* Bit Position 5 */
"DLP", /* Bit Position 4 */
"SDES", /* Bit Position 5 */
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
"Poisoned TLP", /* Bit Position 12 */
"Flow Control Protocol", /* Bit Position 13 */
"Completion Timeout", /* Bit Position 14 */
"Completer Abort", /* Bit Position 15 */
"Unexpected Completion", /* Bit Position 16 */
"Receiver Overflow", /* Bit Position 17 */
"Malformed TLP", /* Bit Position 18 */
"TLP", /* Bit Position 12 */
"FCP", /* Bit Position 13 */
"CmpltTO", /* Bit Position 14 */
"CmpltAbrt", /* Bit Position 15 */
"UnxCmplt", /* Bit Position 16 */
"RxOF", /* Bit Position 17 */
"MalfTLP", /* Bit Position 18 */
"ECRC", /* Bit Position 19 */
"Unsupported Request", /* Bit Position 20 */
"ACS Violation", /* Bit Position 21 */
"Uncorrectable Internal Error", /* Bit Position 22 */
"MC Blocked TLP", /* Bit Position 23 */
"AtomicOp Egress Blocked", /* Bit Position 24 */
"TLP Prefix Blocked Error", /* Bit Position 25 */
"UnsupReq", /* Bit Position 20 */
"ACSViol", /* Bit Position 21 */
"UncorrIntErr", /* Bit Position 22 */
"BlockedTLP", /* Bit Position 23 */
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
};
static const char *aer_agent_string[] = {
@ -513,6 +583,144 @@ static const char *aer_agent_string[] = {
"Transmitter ID"
};
#define aer_stats_dev_attr(name, stats_array, strings_array, \
total_string, total_field) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
unsigned int i; \
char *str = buf; \
struct pci_dev *pdev = to_pci_dev(dev); \
u64 *stats = pdev->aer_stats->stats_array; \
\
for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
if (strings_array[i]) \
str += sprintf(str, "%s %llu\n", \
strings_array[i], stats[i]); \
else if (stats[i]) \
str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
i, stats[i]); \
} \
str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
pdev->aer_stats->total_field); \
return str-buf; \
} \
static DEVICE_ATTR_RO(name)
aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
aer_correctable_error_string, "ERR_COR",
dev_total_cor_errs);
aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
aer_uncorrectable_error_string, "ERR_FATAL",
dev_total_fatal_errs);
aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
aer_uncorrectable_error_string, "ERR_NONFATAL",
dev_total_nonfatal_errs);
#define aer_stats_rootport_attr(name, field) \
static ssize_t \
name##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
} \
static DEVICE_ATTR_RO(name)
aer_stats_rootport_attr(aer_rootport_total_err_cor,
rootport_total_cor_errs);
aer_stats_rootport_attr(aer_rootport_total_err_fatal,
rootport_total_fatal_errs);
aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
rootport_total_nonfatal_errs);
static struct attribute *aer_stats_attrs[] __ro_after_init = {
&dev_attr_aer_dev_correctable.attr,
&dev_attr_aer_dev_fatal.attr,
&dev_attr_aer_dev_nonfatal.attr,
&dev_attr_aer_rootport_total_err_cor.attr,
&dev_attr_aer_rootport_total_err_fatal.attr,
&dev_attr_aer_rootport_total_err_nonfatal.attr,
NULL
};
static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
if (!pdev->aer_stats)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
a == &dev_attr_aer_rootport_total_err_fatal.attr ||
a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
return 0;
return a->mode;
}
const struct attribute_group aer_stats_attr_group = {
.attrs = aer_stats_attrs,
.is_visible = aer_stats_attrs_are_visible,
};
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
int status, i, max = -1;
u64 *counter = NULL;
struct aer_stats *aer_stats = pdev->aer_stats;
if (!aer_stats)
return;
switch (info->severity) {
case AER_CORRECTABLE:
aer_stats->dev_total_cor_errs++;
counter = &aer_stats->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
aer_stats->dev_total_nonfatal_errs++;
counter = &aer_stats->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
aer_stats->dev_total_fatal_errs++;
counter = &aer_stats->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
status = (info->status & ~info->mask);
for (i = 0; i < max; i++)
if (status & (1 << i))
counter[i]++;
}
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
struct aer_stats *aer_stats = pdev->aer_stats;
if (!aer_stats)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
aer_stats->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
aer_stats->rootport_total_fatal_errs++;
else
aer_stats->rootport_total_nonfatal_errs++;
}
}
static void __print_tlp_header(struct pci_dev *dev,
struct aer_header_log_regs *t)
{
@ -545,9 +753,10 @@ static void __aer_print_error(struct pci_dev *dev,
pci_err(dev, " [%2d] Unknown Error Bit%s\n",
i, info->first_error == i ? " (First)" : "");
}
pci_dev_aer_stats_incr(dev, info);
}
static void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
int layer, agent;
int id = ((dev->bus->number << 8) | dev->devfn);
@ -799,6 +1008,7 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
pcie_do_nonfatal_recovery(dev);
else if (info->severity == AER_FATAL)
@ -876,7 +1086,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
/**
* get_device_error_info - read error status from dev and store it to info
* aer_get_device_error_info - read error status from dev and store it to info
* @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
*
@ -884,7 +1094,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
int pos, temp;
@ -942,11 +1152,11 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
/* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (get_device_error_info(e_info->dev[i], e_info))
if (aer_get_device_error_info(e_info->dev[i], e_info))
handle_error_source(e_info->dev[i], e_info);
}
}
@ -962,6 +1172,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct pci_dev *pdev = rpc->rpd;
struct aer_err_info *e_info = &rpc->e_info;
pci_rootport_aer_stats_incr(pdev, e_src);
/*
* There is a possibility that both correctable error and
* uncorrectable error being logged. Report correctable error first.
@ -1305,6 +1517,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
{
u32 reg32;
int pos;
int rc;
pos = dev->aer_cap;
@ -1313,7 +1526,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
pci_reset_bridge_secondary_bus(dev);
rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@ -1325,7 +1538,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
return PCI_ERS_RESULT_RECOVERED;
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
/**
@ -1336,20 +1549,8 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
*/
static void aer_error_resume(struct pci_dev *dev)
{
int pos;
u32 status, mask;
u16 reg16;
/* Clean up Root device status */
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
/* Clean AER Root Error Status */
pos = dev->aer_cap;
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
status &= ~mask; /* Clear corresponding nonfatal bits */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
pci_aer_clear_device_status(dev);
pci_cleanup_aer_uncorrect_error_status(dev);
}
static struct pcie_port_service_driver aerdriver = {

View File

@ -1127,11 +1127,9 @@ static int pcie_aspm_set_policy(const char *val,
if (aspm_disabled)
return -EPERM;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
break;
if (i >= ARRAY_SIZE(policy_str))
return -EINVAL;
i = sysfs_match_string(policy_str, val);
if (i < 0)
return i;
if (i == aspm_policy)
return 0;

View File

@ -6,6 +6,7 @@
* Copyright (C) 2016 Intel Corp.
*/
#include <linux/aer.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@ -16,10 +17,8 @@
struct dpc_dev {
struct pcie_device *dev;
struct work_struct work;
u16 cap_pos;
bool rp_extensions;
u32 rp_pio_status;
u8 rp_log_size;
};
@ -65,19 +64,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
return 0;
}
static void dpc_wait_link_inactive(struct dpc_dev *dpc)
{
struct pci_dev *pdev = dpc->dev->port;
pcie_wait_for_link(pdev, false);
}
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
struct pcie_device *pciedev;
struct device *devdpc;
u16 cap, ctl;
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
@ -92,34 +85,17 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
dpc_wait_link_inactive(dpc);
pcie_wait_for_link(pdev, false);
if (dpc->rp_extensions && dpc_wait_rp_inactive(dpc))
return PCI_ERS_RESULT_DISCONNECT;
if (dpc->rp_extensions && dpc->rp_pio_status) {
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS,
dpc->rp_pio_status);
dpc->rp_pio_status = 0;
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
ctl | PCI_EXP_DPC_CTL_INT_EN);
return PCI_ERS_RESULT_RECOVERED;
}
static void dpc_work(struct work_struct *work)
{
struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
struct pci_dev *pdev = dpc->dev->port;
/* We configure DPC so it only triggers on ERR_FATAL */
pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
}
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
@ -134,8 +110,6 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
status, mask);
dpc->rp_pio_status = status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
@ -146,15 +120,14 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
first_error = (dpc_status & 0x1f00) >> 8;
status &= ~mask;
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if (status & (1 << i))
if ((status & ~mask) & (1 << i))
dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
first_error == i ? " (First)" : "");
}
if (dpc->rp_log_size < 4)
return;
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
&dw0);
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
@ -167,7 +140,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
dw0, dw1, dw2, dw3);
if (dpc->rp_log_size < 5)
return;
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log);
@ -176,43 +149,26 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
static irqreturn_t dpc_irq(int irq, void *context)
static irqreturn_t dpc_handler(int irq, void *context)
{
struct dpc_dev *dpc = (struct dpc_dev *)context;
struct aer_err_info info;
struct dpc_dev *dpc = context;
struct pci_dev *pdev = dpc->dev->port;
struct device *dev = &dpc->dev->device;
u16 cap = dpc->cap_pos, ctl, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_CTL, &ctl);
if (!(ctl & PCI_EXP_DPC_CTL_INT_EN) || ctl == (u16)(~0))
return IRQ_NONE;
u16 cap = dpc->cap_pos, status, source, reason, ext_reason;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT))
return IRQ_NONE;
if (!(status & PCI_EXP_DPC_STATUS_TRIGGER)) {
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_INTERRUPT);
return IRQ_HANDLED;
}
pci_write_config_word(pdev, cap + PCI_EXP_DPC_CTL,
ctl & ~PCI_EXP_DPC_CTL_INT_EN);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
&source);
pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n",
status, source);
status, source);
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
dev_warn(dev, "DPC %s detected, remove downstream devices\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
@ -220,15 +176,36 @@ static irqreturn_t dpc_irq(int irq, void *context)
(ext_reason == 0) ? "RP PIO error" :
(ext_reason == 1) ? "software trigger" :
"reserved error");
/* show RP PIO error detail information */
if (dpc->rp_extensions && reason == 3 && ext_reason == 0)
dpc_process_rp_pio_error(dpc);
else if (reason == 0 && aer_get_device_error_info(pdev, &info)) {
aer_print_error(pdev, &info);
pci_cleanup_aer_uncorrect_error_status(pdev);
}
/* We configure DPC so it only triggers on ERR_FATAL */
pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
return IRQ_HANDLED;
}
static irqreturn_t dpc_irq(int irq, void *context)
{
struct dpc_dev *dpc = (struct dpc_dev *)context;
struct pci_dev *pdev = dpc->dev->port;
u16 cap = dpc->cap_pos, status;
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
return IRQ_NONE;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_INTERRUPT);
schedule_work(&dpc->work);
if (status & PCI_EXP_DPC_STATUS_TRIGGER)
return IRQ_WAKE_THREAD;
return IRQ_HANDLED;
}
@ -250,11 +227,11 @@ static int dpc_probe(struct pcie_device *dev)
dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
dpc->dev = dev;
INIT_WORK(&dpc->work, dpc_work);
set_service_data(dev, dpc);
status = devm_request_irq(device, dev->irq, dpc_irq, IRQF_SHARED,
"pcie-dpc", dpc);
status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
dpc_handler, IRQF_SHARED,
"pcie-dpc", dpc);
if (status) {
dev_warn(device, "request IRQ%d failed: %d\n", dev->irq,
status);

View File

@ -175,9 +175,11 @@ out:
*/
static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
pci_reset_bridge_secondary_bus(dev);
int rc;
rc = pci_bridge_secondary_bus_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
return PCI_ERS_RESULT_RECOVERED;
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
@ -252,6 +254,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
dev->error_state = state;
pci_walk_bus(dev->subordinate, cb, &result_data);
if (cb == report_resume) {
pci_aer_clear_device_status(dev);
pci_cleanup_aer_uncorrect_error_status(dev);
dev->error_state = pci_channel_io_normal;
}
@ -259,15 +262,10 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
/*
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
* The error is non fatal so the bus is ok; just invoke
* the callback for the function that logged the error.
*/
if (state == pci_channel_io_normal)
/*
* the error is non fatal so the bus is ok, just invoke
* the callback for the function that logged the error.
*/
cb(dev, &result_data);
else
pci_walk_bus(dev->bus, cb, &result_data);
cb(dev, &result_data);
}
return result_data.result;
@ -317,7 +315,8 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
* do error recovery on all subordinates of the bridge instead
* of the bridge and clear the error status of the bridge.
*/
pci_cleanup_aer_uncorrect_error_status(dev);
pci_aer_clear_fatal_status(dev);
pci_aer_clear_device_status(dev);
}
if (result == PCI_ERS_RESULT_RECOVERED) {

View File

@ -50,6 +50,7 @@ struct pcie_port_service_driver {
int (*probe) (struct pcie_device *dev);
void (*remove) (struct pcie_device *dev);
int (*suspend) (struct pcie_device *dev);
int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
@ -82,6 +83,7 @@ extern struct bus_type pcie_port_bus_type;
int pcie_port_device_register(struct pci_dev *dev);
#ifdef CONFIG_PM
int pcie_port_device_suspend(struct device *dev);
int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);

View File

@ -353,14 +353,19 @@ error_disable:
}
#ifdef CONFIG_PM
static int suspend_iter(struct device *dev, void *data)
typedef int (*pcie_pm_callback_t)(struct pcie_device *);
static int pm_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
size_t offset = *(size_t *)data;
pcie_pm_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
if (service_driver->suspend)
service_driver->suspend(to_pcie_device(dev));
cb = *(pcie_pm_callback_t *)((void *)service_driver + offset);
if (cb)
return cb(to_pcie_device(dev));
}
return 0;
}
@ -371,20 +376,14 @@ static int suspend_iter(struct device *dev, void *data)
*/
int pcie_port_device_suspend(struct device *dev)
{
return device_for_each_child(dev, NULL, suspend_iter);
size_t off = offsetof(struct pcie_port_service_driver, suspend);
return device_for_each_child(dev, &off, pm_iter);
}
static int resume_iter(struct device *dev, void *data)
int pcie_port_device_resume_noirq(struct device *dev)
{
struct pcie_port_service_driver *service_driver;
if ((dev->bus == &pcie_port_bus_type) &&
(dev->driver)) {
service_driver = to_service_driver(dev->driver);
if (service_driver->resume)
service_driver->resume(to_pcie_device(dev));
}
return 0;
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
return device_for_each_child(dev, &off, pm_iter);
}
/**
@ -393,7 +392,8 @@ static int resume_iter(struct device *dev, void *data)
*/
int pcie_port_device_resume(struct device *dev)
{
return device_for_each_child(dev, NULL, resume_iter);
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */

View File

@ -42,17 +42,6 @@ __setup("pcie_ports=", pcie_port_setup);
/* global data */
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
int retval;
retval = pci_enable_device(dev);
if (retval)
return retval;
pci_set_master(dev);
return 0;
}
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
@ -76,10 +65,12 @@ static int pcie_port_runtime_idle(struct device *dev)
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume_noirq = pcie_port_device_resume_noirq,
.resume = pcie_port_device_resume,
.freeze = pcie_port_device_suspend,
.thaw = pcie_port_device_resume,
.poweroff = pcie_port_device_suspend,
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
@ -160,19 +151,6 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
dev->state_saved = true;
pci_restore_state(dev);
pcie_portdrv_restore_config(dev);
pci_enable_pcie_error_reporting(dev);
}
return PCI_ERS_RESULT_RECOVERED;
}
static int resume_iter(struct device *device, void *data)
{
struct pcie_device *pcie_device;
@ -208,7 +186,6 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.mmio_enabled = pcie_portdrv_mmio_enabled,
.slot_reset = pcie_portdrv_slot_reset,
.resume = pcie_portdrv_err_resume,
};

View File

@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <linux/acpi.h>
#include <linux/hypervisor.h>
@ -1549,6 +1548,20 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
return 0;
}
static void early_dump_pci_device(struct pci_dev *pdev)
{
u32 value[256 / 4];
int i;
pci_info(pdev, "config space:\n");
for (i = 0; i < 256; i += 4)
pci_read_config_dword(pdev, i, &value[i / 4]);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
value, 256, false);
}
/**
* pci_setup_device - Fill in class and map information of a device
* @dev: the device structure to fill
@ -1598,6 +1611,9 @@ int pci_setup_device(struct pci_dev *dev)
pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
if (pci_early_dump)
early_dump_pci_device(dev);
/* Need to have dev->class ready */
dev->cfg_size = pci_cfg_space_size(dev);
@ -1725,11 +1741,15 @@ int pci_setup_device(struct pci_dev *dev)
static void pci_configure_mps(struct pci_dev *dev)
{
struct pci_dev *bridge = pci_upstream_bridge(dev);
int mps, p_mps, rc;
int mps, mpss, p_mps, rc;
if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
return;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
if (dev->is_virtfn)
return;
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@ -1749,6 +1769,14 @@ static void pci_configure_mps(struct pci_dev *dev)
if (pcie_bus_config != PCIE_BUS_DEFAULT)
return;
mpss = 128 << dev->pcie_mpss;
if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
pcie_set_mps(bridge, mpss);
pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
mpss, p_mps, 128 << bridge->pcie_mpss);
p_mps = pcie_get_mps(bridge);
}
rc = pcie_set_mps(dev, p_mps);
if (rc) {
pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
@ -1757,7 +1785,7 @@ static void pci_configure_mps(struct pci_dev *dev)
}
pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
p_mps, mps, 128 << dev->pcie_mpss);
p_mps, mps, mpss);
}
static struct hpp_type0 pci_default_type0 = {
@ -2042,6 +2070,29 @@ static void pci_configure_ltr(struct pci_dev *dev)
#endif
}
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
u32 cap;
if (!pci_is_pcie(dev))
return;
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
return;
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
dev->eetlp_prefix_path = 1;
else {
bridge = pci_upstream_bridge(dev);
if (bridge && bridge->eetlp_prefix_path)
dev->eetlp_prefix_path = 1;
}
#endif
}
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
@ -2051,6 +2102,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
pci_configure_eetlp_prefix(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
@ -2064,6 +2116,7 @@ static void pci_configure_device(struct pci_dev *dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
pci_aer_exit(dev);
pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
@ -2156,8 +2209,8 @@ static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
@ -2172,6 +2225,24 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
return true;
}
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
#ifdef CONFIG_PCI_QUIRKS
struct pci_dev *bridge = bus->self;
/*
* Certain IDT switches have an issue where they improperly trigger
* ACS Source Validation errors on completions for config reads.
*/
if (bridge && bridge->vendor == PCI_VENDOR_ID_IDT &&
bridge->device == 0x80b5)
return pci_idt_bus_quirk(bus, devfn, l, timeout);
#endif
return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
/*
@ -2205,6 +2276,25 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return dev;
}
static void pcie_report_downtraining(struct pci_dev *dev)
{
if (!pci_is_pcie(dev))
return;
/* Look from the device up to avoid downstream ports with no devices */
if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) &&
(pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) &&
(pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM))
return;
/* Multi-function PCIe devices share the same link/status */
if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn)
return;
/* Print link status only if the device is constrained by the fabric */
__pcie_print_link_status(dev, false);
}
static void pci_init_capabilities(struct pci_dev *dev)
{
/* Enhanced Allocation */
@ -2240,6 +2330,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Advanced Error Reporting */
pci_aer_init(dev);
pcie_report_downtraining(dev);
if (pci_probe_reset_function(dev) == 0)
dev->reset_fn = 1;
}

View File

@ -25,8 +25,10 @@
#include <linux/sched.h>
#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
#include <linux/switchtec.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@ -460,6 +462,7 @@ static void quirk_nfp6000(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
@ -2105,6 +2108,7 @@ static void quirk_netmos(struct pci_dev *dev)
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
/* else: fall through */
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
@ -2352,6 +2356,9 @@ static void quirk_paxc_bridge(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
#endif
/*
@ -3664,6 +3671,108 @@ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
/*
* The Samsung SM961/PM961 controller can sometimes enter a fatal state after
* FLR where config space reads from the device return -1. We seem to be
* able to avoid this condition if we disable the NVMe controller prior to
* FLR. This quirk is generic for any NVMe class device requiring similar
* assistance to quiesce the device prior to FLR.
*
* NVMe specification: https://nvmexpress.org/resources/specifications/
* Revision 1.0e:
* Chapter 2: Required and optional PCI config registers
* Chapter 3: NVMe control registers
* Chapter 7.3: Reset behavior
*/
static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
{
void __iomem *bar;
u16 cmd;
u32 cfg;
if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
!pcie_has_flr(dev) || !pci_resource_start(dev, 0))
return -ENOTTY;
if (probe)
return 0;
bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
if (!bar)
return -ENOTTY;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
cfg = readl(bar + NVME_REG_CC);
/* Disable controller if enabled */
if (cfg & NVME_CC_ENABLE) {
u32 cap = readl(bar + NVME_REG_CAP);
unsigned long timeout;
/*
* Per nvme_disable_ctrl() skip shutdown notification as it
* could complete commands to the admin queue. We only intend
* to quiesce the device before reset.
*/
cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
writel(cfg, bar + NVME_REG_CC);
/*
* Some controllers require an additional delay here, see
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY. None of those are yet
* supported by this quirk.
*/
/* Cap register provides max timeout in 500ms increments */
timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
for (;;) {
u32 status = readl(bar + NVME_REG_CSTS);
/* Ready status becomes zero on disable complete */
if (!(status & NVME_CSTS_RDY))
break;
msleep(100);
if (time_after(jiffies, timeout)) {
pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
break;
}
}
}
pci_iounmap(dev, bar);
pcie_flr(dev);
return 0;
}
/*
* Intel DC P3700 NVMe controller will timeout waiting for ready status
* to change after NVMe enable if the driver starts interacting with the
* device too soon after FLR. A 250ms delay after FLR has heuristically
* proven to produce reliably working results for device assignment cases.
*/
static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
{
if (!pcie_has_flr(dev))
return -ENOTTY;
if (probe)
return 0;
pcie_flr(dev);
msleep(250);
return 0;
}
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
@ -3671,6 +3780,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ 0 }
@ -3740,6 +3851,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c134 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
quirk_dma_func1_alias);
@ -4553,27 +4667,79 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
return 0;
}
static const struct pci_dev_enable_acs {
static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
{
int pos;
u32 cap, ctrl;
if (!pci_quirk_intel_spt_pch_acs_match(dev))
return -ENOTTY;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
return -ENOTTY;
pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
return 0;
}
static const struct pci_dev_acs_ops {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
} pci_dev_enable_acs[] = {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
{ 0 }
int (*disable_acs_redir)(struct pci_dev *dev);
} pci_dev_acs_ops[] = {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
.enable_acs = pci_quirk_enable_intel_pch_acs,
},
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
.enable_acs = pci_quirk_enable_intel_spt_pch_acs,
.disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
},
};
int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
const struct pci_dev_enable_acs *i;
int ret;
const struct pci_dev_acs_ops *p;
int i, ret;
for (i = pci_dev_enable_acs; i->enable_acs; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID)) {
ret = i->enable_acs(dev);
for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
p = &pci_dev_acs_ops[i];
if ((p->vendor == dev->vendor ||
p->vendor == (u16)PCI_ANY_ID) &&
(p->device == dev->device ||
p->device == (u16)PCI_ANY_ID) &&
p->enable_acs) {
ret = p->enable_acs(dev);
if (ret >= 0)
return ret;
}
}
return -ENOTTY;
}
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
const struct pci_dev_acs_ops *p;
int i, ret;
for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
p = &pci_dev_acs_ops[i];
if ((p->vendor == dev->vendor ||
p->vendor == (u16)PCI_ANY_ID) &&
(p->device == dev->device ||
p->device == (u16)PCI_ANY_ID) &&
p->disable_acs_redir) {
ret = p->disable_acs_redir(dev);
if (ret >= 0)
return ret;
}
@ -4753,3 +4919,197 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
/*
* Some IDT switches incorrectly flag an ACS Source Validation error on
* completions for config read requests even though PCIe r4.0, sec
* 6.12.1.1, says that completions are never affected by ACS Source
* Validation. Here's the text of IDT 89H32H8G3-YC, erratum #36:
*
* Item #36 - Downstream port applies ACS Source Validation to Completions
* Section 6.12.1.1 of the PCI Express Base Specification 3.1 states that
* completions are never affected by ACS Source Validation. However,
* completions received by a downstream port of the PCIe switch from a
* device that has not yet captured a PCIe bus number are incorrectly
* dropped by ACS Source Validation by the switch downstream port.
*
* The workaround suggested by IDT is to issue a config write to the
* downstream device before issuing the first config read. This allows the
* downstream device to capture its bus and device numbers (see PCIe r4.0,
* sec 2.2.9), thus avoiding the ACS error on the completion.
*
* However, we don't know when the device is ready to accept the config
* write, so we do config reads until we receive a non-Config Request Retry
* Status, then do the config write.
*
* To avoid hitting the erratum when doing the config reads, we disable ACS
* SV around this process.
*/
int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
{
int pos;
u16 ctrl = 0;
bool found;
struct pci_dev *bridge = bus->self;
pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
/* Disable ACS SV before initial config reads */
if (pos) {
pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
if (ctrl & PCI_ACS_SV)
pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
ctrl & ~PCI_ACS_SV);
}
found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
/* Write Vendor ID (read-only) so the endpoint latches its bus/dev */
if (found)
pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
/* Re-enable ACS_SV if it was previously enabled */
if (ctrl & PCI_ACS_SV)
pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
return found;
}
/*
* Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between
* NT endpoints via the internal switch fabric. These IDs replace the
* originating requestor ID TLPs which access host memory on peer NTB
* ports. Therefore, all proxy IDs must be aliased to the NTB device
* to permit access when the IOMMU is turned on.
*/
static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
{
void __iomem *mmio;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
struct sys_info_regs __iomem *mmio_sys_info;
u64 partition_map;
u8 partition;
int pp;
if (pci_enable_device(pdev)) {
pci_err(pdev, "Cannot enable Switchtec device\n");
return;
}
mmio = pci_iomap(pdev, 0, 0);
if (mmio == NULL) {
pci_disable_device(pdev);
pci_err(pdev, "Cannot iomap Switchtec device\n");
return;
}
pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
partition = ioread8(&mmio_ntb->partition_id);
partition_map = ioread32(&mmio_ntb->ep_map);
partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
partition_map &= ~(1ULL << partition);
for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
u32 table_sz = 0;
int te;
if (!(partition_map & (1ULL << pp)))
continue;
pci_dbg(pdev, "Processing partition %d\n", pp);
mmio_peer_ctrl = &mmio_ctrl[pp];
table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
if (!table_sz) {
pci_warn(pdev, "Partition %d table_sz 0\n", pp);
continue;
}
if (table_sz > 512) {
pci_warn(pdev,
"Invalid Switchtec partition %d table_sz %d\n",
pp, table_sz);
continue;
}
for (te = 0; te < table_sz; te++) {
u32 rid_entry;
u8 devfn;
rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
devfn = (rid_entry >> 1) & 0xFF;
pci_dbg(pdev,
"Aliasing Partition %d Proxy ID %02x.%d\n",
pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
pci_add_dma_alias(pdev, devfn);
}
}
pci_iounmap(pdev, mmio);
pci_disable_device(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
quirk_switchtec_ntb_dma_alias);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
quirk_switchtec_ntb_dma_alias);

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)

View File

@ -80,7 +80,8 @@ EXPORT_SYMBOL_GPL(pci_disable_rom);
* The PCI window size could be much larger than the
* actual image size.
*/
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom,
size_t size)
{
void __iomem *image;
int last_image;
@ -106,8 +107,14 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
length = readw(pds + 16);
image += length * 512;
/* Avoid iterating through memory outside the resource window */
if (image > rom + size)
if (image >= rom + size)
break;
if (!last_image) {
if (readw(image) != 0xAA55) {
pci_info(pdev, "No more image in the PCI ROM\n");
break;
}
}
} while (length && !last_image);
/* never return a size larger than the PCI resource window */

View File

@ -641,7 +641,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
if (reg != MICROSEMI_VENDOR_ID)
if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
@ -1203,7 +1203,7 @@ static void init_pff(struct switchtec_dev *stdev)
for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
if (reg != MICROSEMI_VENDOR_ID)
if (reg != PCI_VENDOR_ID_MICROSEMI)
break;
}
@ -1267,7 +1267,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
struct switchtec_dev *stdev;
int rc;
if (pdev->class == MICROSEMI_NTB_CLASSCODE)
if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
request_module_nowait("ntb_hw_switchtec");
stdev = stdev_create(pdev);
@ -1321,19 +1321,19 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
#define SWITCHTEC_PCI_DEVICE(device_id) \
{ \
.vendor = MICROSEMI_VENDOR_ID, \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = MICROSEMI_MGMT_CLASSCODE, \
.class = (PCI_CLASS_MEMORY_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}, \
{ \
.vendor = MICROSEMI_VENDOR_ID, \
.vendor = PCI_VENDOR_ID_MICROSEMI, \
.device = device_id, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
.class = MICROSEMI_NTB_CLASSCODE, \
.class = (PCI_CLASS_BRIDGE_OTHER << 8), \
.class_mask = 0xFFFFFFFF, \
}

View File

@ -146,7 +146,7 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (!vpd->busy)
return 0;
while (time_before(jiffies, timeout)) {
do {
ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
&status);
if (ret < 0)
@ -160,10 +160,13 @@ static int pci_vpd_wait(struct pci_dev *dev)
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout))
break;
usleep_range(10, max_sleep);
if (max_sleep < 1024)
max_sleep *= 2;
}
} while (true);
pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
return -ETIMEDOUT;

Some files were not shown because too many files have changed in this diff Show More