1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next

Pull networking updates from David Miller:

 1) More iov_iter conversion work from Al Viro.

    [ The "crypto: switch af_alg_make_sg() to iov_iter" commit was
      wrong, and this pull actually adds an extra commit on top of the
      branch I'm pulling to fix that up, so that the pre-merge state is
      ok.   - Linus ]

 2) Various optimizations to the ipv4 forwarding information base trie
    lookup implementation.  From Alexander Duyck.

 3) Remove sock_iocb altogether, from CHristoph Hellwig.

 4) Allow congestion control algorithm selection via routing metrics.
    From Daniel Borkmann.

 5) Make ipv4 uncached route list per-cpu, from Eric Dumazet.

 6) Handle rfs hash collisions more gracefully, also from Eric Dumazet.

 7) Add xmit_more support to r8169, e1000, and e1000e drivers.  From
    Florian Westphal.

 8) Transparent Ethernet Bridging support for GRO, from Jesse Gross.

 9) Add BPF packet actions to packet scheduler, from Jiri Pirko.

10) Add support for uniqu flow IDs to openvswitch, from Joe Stringer.

11) New NetCP ethernet driver, from Muralidharan Karicheri and Wingman
    Kwok.

12) More sanely handle out-of-window dupacks, which can result in
    serious ACK storms.  From Neal Cardwell.

13) Various rhashtable bug fixes and enhancements, from Herbert Xu,
    Patrick McHardy, and Thomas Graf.

14) Support xmit_more in be2net, from Sathya Perla.

15) Group Policy extensions for vxlan, from Thomas Graf.

16) Remove Checksum Offload support for vxlan, from Tom Herbert.

17) Like ipv4, support lockless transmit over ipv6 UDP sockets.  From
    Vlad Yasevich.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1494+1 commits)
  crypto: fix af_alg_make_sg() conversion to iov_iter
  ipv4: Namespecify TCP PMTU mechanism
  i40e: Fix for stats init function call in Rx setup
  tcp: don't include Fast Open option in SYN-ACK on pure SYN-data
  openvswitch: Only set TUNNEL_VXLAN_OPT if VXLAN-GBP metadata is set
  ipv6: Make __ipv6_select_ident static
  ipv6: Fix fragment id assignment on LE arches.
  bridge: Fix inability to add non-vlan fdb entry
  net: Mellanox: Delete unnecessary checks before the function call "vunmap"
  cxgb4: Add support in cxgb4 to get expansion rom version via ethtool
  ethtool: rename reserved1 memeber in ethtool_drvinfo for expansion ROM version
  net: dsa: Remove redundant phy_attach()
  IB/mlx4: Reset flow support for IB kernel ULPs
  IB/mlx4: Always use the correct port for mirrored multicast attachments
  net/bonding: Fix potential bad memory access during bonding events
  tipc: remove tipc_snprintf
  tipc: nl compat add noop and remove legacy nl framework
  tipc: convert legacy nl stats show to nl compat
  tipc: convert legacy nl net id get to nl compat
  tipc: convert legacy nl net id set to nl compat
  ...
wifi-calibration
Linus Torvalds 2015-02-10 20:01:30 -08:00
commit c5ce28df0e
1236 changed files with 72057 additions and 29385 deletions

View File

@ -113,7 +113,6 @@
!Finclude/net/cfg80211.h cfg80211_beacon_data
!Finclude/net/cfg80211.h cfg80211_ap_settings
!Finclude/net/cfg80211.h station_parameters
!Finclude/net/cfg80211.h station_info_flags
!Finclude/net/cfg80211.h rate_info_flags
!Finclude/net/cfg80211.h rate_info
!Finclude/net/cfg80211.h station_info
@ -435,7 +434,6 @@
<section id="ps-client">
<title>support for powersaving clients</title>
!Pinclude/net/mac80211.h AP support for powersaving clients
</section>
!Finclude/net/mac80211.h ieee80211_get_buffered_bc
!Finclude/net/mac80211.h ieee80211_beacon_get
!Finclude/net/mac80211.h ieee80211_sta_eosp
@ -444,6 +442,7 @@
!Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
!Finclude/net/mac80211.h ieee80211_sta_set_buffered
!Finclude/net/mac80211.h ieee80211_sta_block_awake
</section>
</chapter>
<chapter id="multi-iface">
@ -488,8 +487,8 @@
<title>RX A-MPDU aggregation</title>
!Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
!Cnet/mac80211/agg-rx.c
</sect1>
!Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
</sect1>
</chapter>
<chapter id="smps">

View File

@ -7,17 +7,38 @@ Required properties:
- SerDes Rx/Tx registers
- SerDes integration registers (1/2)
- SerDes integration registers (2/2)
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
- interrupts: Should contain the amd-xgbe-phy interrupt.
Optional properties:
- amd,speed-set: Speed capabilities of the device
0 - 1GbE and 10GbE (default)
1 - 2.5GbE and 10GbE
The following optional properties are represented by an array with each
value corresponding to a particular speed. The first array value represents
the setting for the 1GbE speed, the second value for the 2.5GbE speed and
the third value for the 10GbE speed. All three values are required if the
property is used.
- amd,serdes-blwc: Baseline wandering correction enablement
0 - Off
1 - On
- amd,serdes-cdr-rate: CDR rate speed selection
- amd,serdes-pq-skew: PQ (data sampling) skew
- amd,serdes-tx-amp: TX amplitude boost
Example:
xgbe_phy@e1240800 {
compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
reg = <0 0xe1240800 0 0x00400>,
<0 0xe1250000 0 0x00060>,
<0 0xe1250080 0 0x00004>;
interrupt-parent = <&gic>;
interrupts = <0 323 4>;
amd,speed-set = <0>;
amd,serdes-blwc = <1>, <1>, <0>;
amd,serdes-cdr-rate = <2>, <2>, <7>;
amd,serdes-pq-skew = <10>, <10>, <30>;
amd,serdes-tx-amp = <15>, <15>, <10>;
};

View File

@ -11,6 +11,8 @@ Required properties:
Optional properties:
- davicom,no-eeprom : Configuration EEPROM is not available
- davicom,ext-phy : Use external PHY
- reset-gpios : phandle of gpio that will be used to reset chip during probe
- vcc-supply : phandle of regulator that will be used to enable power to chip
Example:
@ -21,4 +23,6 @@ Example:
interrupts = <7 4>;
local-mac-address = [00 00 de ad be ef];
davicom,no-eeprom;
reset-gpios = <&gpf 12 GPIO_ACTIVE_LOW>;
vcc-supply = <&eth0_power>;
};

View File

@ -22,6 +22,8 @@ Optional properties:
- fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
hw multi queues. Should specify the rx queue number, otherwise set rx queue
number to 1.
- fsl,magic-packet : If present, indicates that the hardware supports waking
up via magic packet.
Optional subnodes:
- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes

View File

@ -8,7 +8,16 @@ of how to define a PHY.
Required properties:
- reg : Offset and length of the register set for the device
- compatible : Should define the compatible device type for the
mdio. Currently, this is most likely to be "fsl,gianfar-mdio"
mdio. Currently supported strings/devices are:
- "fsl,gianfar-tbi"
- "fsl,gianfar-mdio"
- "fsl,etsec2-tbi"
- "fsl,etsec2-mdio"
- "fsl,ucc-mdio"
- "fsl,fman-mdio"
When device_type is "mdio", the following strings are also considered:
- "gianfar"
- "ucc_geth_phy"
Example:

View File

@ -0,0 +1,88 @@
Hisilicon hip04 Ethernet Controller
* Ethernet controller node
Required properties:
- compatible: should be "hisilicon,hip04-mac".
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device.
- port-handle: <phandle port channel>
phandle, specifies a reference to the syscon ppe node
port, port number connected to the controller
channel, recv channel start from channel * number (RX_DESC_NUM)
- phy-mode: see ethernet.txt [1].
Optional properties:
- phy-handle: see ethernet.txt [1].
[1] Documentation/devicetree/bindings/net/ethernet.txt
* Ethernet ppe node:
Control rx & tx fifos of all ethernet controllers.
Have 2048 recv channels shared by all ethernet controllers, only if no overlap.
Each controller's recv channel start from channel * number (RX_DESC_NUM).
Required properties:
- compatible: "hisilicon,hip04-ppe", "syscon".
- reg: address and length of the register set for the device.
* MDIO bus node:
Required properties:
- compatible: should be "hisilicon,hip04-mdio".
- Inherits from MDIO bus node binding [2]
[2] Documentation/devicetree/bindings/net/phy.txt
Example:
mdio {
compatible = "hisilicon,hip04-mdio";
reg = <0x28f1000 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
marvell,reg-init = <18 0x14 0 0x8001>;
};
phy1: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
marvell,reg-init = <18 0x14 0 0x8001>;
};
};
ppe: ppe@28c0000 {
compatible = "hisilicon,hip04-ppe", "syscon";
reg = <0x28c0000 0x10000>;
};
fe: ethernet@28b0000 {
compatible = "hisilicon,hip04-mac";
reg = <0x28b0000 0x10000>;
interrupts = <0 413 4>;
phy-mode = "mii";
port-handle = <&ppe 31 0>;
};
ge0: ethernet@2800000 {
compatible = "hisilicon,hip04-mac";
reg = <0x2800000 0x10000>;
interrupts = <0 402 4>;
phy-mode = "sgmii";
port-handle = <&ppe 0 1>;
phy-handle = <&phy0>;
};
ge8: ethernet@2880000 {
compatible = "hisilicon,hip04-mac";
reg = <0x2880000 0x10000>;
interrupts = <0 410 4>;
phy-mode = "sgmii";
port-handle = <&ppe 8 2>;
phy-handle = <&phy1>;
};

View File

@ -0,0 +1,197 @@
This document describes the device tree bindings associated with the
keystone network coprocessor(NetCP) driver support.
The network coprocessor (NetCP) is a hardware accelerator that processes
Ethernet packets. NetCP has a gigabit Ethernet (GbE) subsytem with a ethernet
switch sub-module to send and receive packets. NetCP also includes a packet
accelerator (PA) module to perform packet classification operations such as
header matching, and packet modification operations such as checksum
generation. NetCP can also optionally include a Security Accelerator (SA)
capable of performing IPSec operations on ingress/egress packets.
Keystone II SoC's also have a 10 Gigabit Ethernet Subsystem (XGbE) which
includes a 3-port Ethernet switch sub-module capable of 10Gb/s and 1Gb/s rates
per Ethernet port.
Keystone NetCP driver has a plug-in module architecture where each of the NetCP
sub-modules exist as a loadable kernel module which plug in to the netcp core.
These sub-modules are represented as "netcp-devices" in the dts bindings. It is
mandatory to have the ethernet switch sub-module for the ethernet interface to
be operational. Any other sub-module like the PA is optional.
NetCP Ethernet SubSystem Layout:
-----------------------------
NetCP subsystem(10G or 1G)
-----------------------------
|
|-> NetCP Devices -> |
| |-> GBE/XGBE Switch
| |
| |-> Packet Accelerator
| |
| |-> Security Accelerator
|
|
|
|-> NetCP Interfaces -> |
|-> Ethernet Port 0
|
|-> Ethernet Port 1
|
|-> Ethernet Port 2
|
|-> Ethernet Port 3
NetCP subsystem properties:
Required properties:
- compatible: Should be "ti,netcp-1.0"
- clocks: phandle to the reference clocks for the subsystem.
- dma-id: Navigator packet dma instance id.
Optional properties:
- reg: register location and the size for the following register
regions in the specified order.
- Efuse MAC address register
- dma-coherent: Present if dma operations are coherent
- big-endian: Keystone devices can be operated in a mode where the DSP is in
the big endian mode. In such cases enable this option. This
option should also be enabled if the ARM is operated in
big endian mode with the DSP in little endian.
NetCP device properties: Device specification for NetCP sub-modules.
1Gb/10Gb (gbe/xgbe) ethernet switch sub-module specifications.
Required properties:
- label: Must be "netcp-gbe" for 1Gb & "netcp-xgbe" for 10Gb.
- reg: register location and the size for the following register
regions in the specified order.
- subsystem registers
- serdes registers
- tx-channel: the navigator packet dma channel name for tx.
- tx-queue: the navigator queue number associated with the tx dma channel.
- interfaces: specification for each of the switch port to be registered as a
network interface in the stack.
-- slave-port: Switch port number, 0 based numbering.
-- link-interface: type of link interface, supported options are
- mac<->mac auto negotiate mode: 0
- mac<->phy mode: 1
- mac<->mac forced mode: 2
- mac<->fiber mode: 3
- mac<->phy mode with no mdio: 4
- 10Gb mac<->phy mode : 10
- 10Gb mac<->mac forced mode : 11
----phy-handle: phandle to PHY device
Optional properties:
- enable-ale: NetCP driver keeps the address learning feature in the ethernet
switch module disabled. This attribute is to enable the address
learning.
- secondary-slave-ports: specification for each of the switch port not be
registered as a network interface. NetCP driver
will only initialize these ports and attach PHY
driver to them if needed.
NetCP interface properties: Interface specification for NetCP sub-modules.
Required properties:
- rx-channel: the navigator packet dma channel name for rx.
- rx-queue: the navigator queue number associated with rx dma channel.
- rx-pool: specifies the number of descriptors to be used & the region-id
for creating the rx descriptor pool.
- tx-pool: specifies the number of descriptors to be used & the region-id
for creating the tx descriptor pool.
- rx-queue-depth: number of descriptors in each of the free descriptor
queue (FDQ) for the pktdma Rx flow. There can be at
present a maximum of 4 queues per Rx flow.
- rx-buffer-size: the buffer size for each of the Rx flow FDQ.
- tx-completion-queue: the navigator queue number where the descriptors are
recycled after Tx DMA completion.
Optional properties:
- efuse-mac: If this is 1, then the MAC address for the interface is
obtained from the device efuse mac address register
- local-mac-address: the driver is designed to use the of_get_mac_address api
only if efuse-mac is 0. When efuse-mac is 0, the MAC
address is obtained from local-mac-address. If this
attribute is not present, then the driver will use a
random MAC address.
- "netcp-device label": phandle to the device specification for each of NetCP
sub-module attached to this interface.
Example binding:
netcp: netcp@2090000 {
reg = <0x2620110 0x8>;
reg-names = "efuse";
compatible = "ti,netcp-1.0";
#address-cells = <1>;
#size-cells = <1>;
ranges;
clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>;
dma-coherent;
/* big-endian; */
dma-id = <0>;
netcp-devices {
#address-cells = <1>;
#size-cells = <1>;
ranges;
gbe@0x2090000 {
label = "netcp-gbe";
reg = <0x2090000 0xf00>;
/* enable-ale; */
tx-queue = <648>;
tx-channel = <8>;
interfaces {
gbe0: interface-0 {
slave-port = <0>;
link-interface = <4>;
};
gbe1: interface-1 {
slave-port = <1>;
link-interface = <4>;
};
};
secondary-slave-ports {
port-2 {
slave-port = <2>;
link-interface = <2>;
};
port-3 {
slave-port = <3>;
link-interface = <2>;
};
};
};
};
netcp-interfaces {
interface-0 {
rx-channel = <22>;
rx-pool = <1024 12>;
tx-pool = <1024 12>;
rx-queue-depth = <128 128 0 0>;
rx-buffer-size = <1518 4096 0 0>;
rx-queue = <8704>;
tx-completion-queue = <8706>;
efuse-mac = <1>;
netcp-gbe = <&gbe0>;
};
interface-1 {
rx-channel = <23>;
rx-pool = <1024 12>;
tx-pool = <1024 12>;
rx-queue-depth = <128 128 0 0>;
rx-buffer-size = <1518 4096 0 0>;
rx-queue = <8705>;
tx-completion-queue = <8707>;
efuse-mac = <0>;
local-mac-address = [02 18 31 7e 3e 6f];
netcp-gbe = <&gbe1>;
};
};
};

View File

@ -1,7 +1,7 @@
* STMicroelectronics SAS. ST21NFCA NFC Controller
Required properties:
- compatible: Should be "st,st21nfca_i2c".
- compatible: Should be "st,st21nfca-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
- interrupt-parent: phandle for the interrupt gpio controller
@ -11,6 +11,10 @@ Required properties:
Optional SoC Specific Properties:
- pinctrl-names: Contains only one value - "default".
- pintctrl-0: Specifies the pin control groups used for this controller.
- ese-present: Specifies that an ese is physically connected to the nfc
controller.
- uicc-present: Specifies that the uicc swp signal can be physically
connected to the nfc controller.
Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
@ -20,7 +24,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
st21nfca: st21nfca@1 {
compatible = "st,st21nfca_i2c";
compatible = "st,st21nfca-i2c";
reg = <0x01>;
clock-frequency = <400000>;
@ -29,5 +33,8 @@ Example (for ARM-based BeagleBoard xM with ST21NFCA on I2C2):
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
enable-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
ese-present;
uicc-present;
};
};

View File

@ -1,7 +1,7 @@
* STMicroelectronics SAS. ST21NFCB NFC Controller
Required properties:
- compatible: Should be "st,st21nfcb_i2c".
- compatible: Should be "st,st21nfcb-i2c".
- clock-frequency: I²C work frequency.
- reg: address on the bus
- interrupt-parent: phandle for the interrupt gpio controller
@ -20,7 +20,7 @@ Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
st21nfcb: st21nfcb@8 {
compatible = "st,st21nfcb_i2c";
compatible = "st,st21nfcb-i2c";
reg = <0x08>;
clock-frequency = <400000>;

View File

@ -0,0 +1,68 @@
Rockchip SoC RK3288 10/100/1000 Ethernet driver(GMAC)
The device node has following properties.
Required properties:
- compatible: Can be "rockchip,rk3288-gmac".
- reg: addresses and length of the register sets for the device.
- interrupts: Should contain the GMAC interrupts.
- interrupt-names: Should contain the interrupt names "macirq".
- rockchip,grf: phandle to the syscon grf used to control speed and mode.
- clocks: <&cru SCLK_MAC>: clock selector for main clock, from PLL or PHY.
<&cru SCLK_MAC_PLL>: PLL clock for SCLK_MAC
<&cru SCLK_MAC_RX>: clock gate for RX
<&cru SCLK_MAC_TX>: clock gate for TX
<&cru SCLK_MACREF>: clock gate for RMII referce clock
<&cru SCLK_MACREF_OUT> clock gate for RMII reference clock output
<&cru ACLK_GMAC>: AXI clock gate for GMAC
<&cru PCLK_GMAC>: APB clock gate for GMAC
- clock-names: One name for each entry in the clocks property.
- phy-mode: See ethernet.txt file in the same directory.
- pinctrl-names: Names corresponding to the numbered pinctrl states.
- pinctrl-0: pin-control mode. can be <&rgmii_pins> or <&rmii_pins>.
- clock_in_out: For RGMII, it must be "input", means main clock(125MHz)
is not sourced from SoC's PLL, but input from PHY; For RMII, "input" means
PHY provides the reference clock(50MHz), "output" means GMAC provides the
reference clock.
- snps,reset-gpio gpio number for phy reset.
- snps,reset-active-low boolean flag to indicate if phy reset is active low.
- assigned-clocks: main clock, should be <&cru SCLK_MAC>;
- assigned-clock-parents = parent of main clock.
can be <&ext_gmac> or <&cru SCLK_MAC_PLL>.
Optional properties:
- tx_delay: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
- rx_delay: Delay value for RXD timing. Range value is 0~0x7F, 0x10 as default.
- phy-supply: phandle to a regulator if the PHY needs one
Example:
gmac: ethernet@ff290000 {
compatible = "rockchip,rk3288-gmac";
reg = <0xff290000 0x10000>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
rockchip,grf = <&grf>;
clocks = <&cru SCLK_MAC>,
<&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
<&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
<&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
clock-names = "stmmaceth",
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
phy-mode = "rgmii";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins /*&rmii_pins*/>;
clock_in_out = "input";
snps,reset-gpio = <&gpio4 7 0>;
snps,reset-active-low;
assigned-clocks = <&cru SCLK_MAC>;
assigned-clock-parents = <&ext_gmac>;
tx_delay = <0x30>;
rx_delay = <0x10>;
status = "ok";
};

View File

@ -9,14 +9,10 @@ The device node has following properties.
Required properties:
- compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac",
"st,stih407-dwmac", "st,stid127-dwmac".
- reg : Offset of the glue configuration register map in system
configuration regmap pointed by st,syscon property and size.
- st,syscon : Should be phandle to system configuration node which
encompases this glue registers.
- st,syscon : Should be phandle/offset pair. The phandle to the syscon node which
encompases the glue register, and the offset of the control register.
- st,gmac_en: this is to enable the gmac into a dedicated sysctl control
register available on STiH407 SoC.
- sti-ethconf: this is the gmac glue logic register to enable the GMAC,
select among the different modes and program the clk retiming.
- pinctrl-0: pin-control for all the MII mode supported.
Optional properties:
@ -40,10 +36,10 @@ ethernet0: dwmac@9630000 {
device_type = "network";
status = "disabled";
compatible = "st,stih407-dwmac", "snps,dwmac", "snps,dwmac-3.710";
reg = <0x9630000 0x8000>, <0x80 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
reg = <0x9630000 0x8000>;
reg-names = "stmmaceth";
st,syscon = <&syscfg_sbc_reg>;
st,syscon = <&syscfg_sbc_reg 0x80>;
st,gmac_en;
resets = <&softreset STIH407_ETH1_SOFTRESET>;
reset-names = "stmmaceth";

View File

@ -43,6 +43,7 @@ Optional properties:
available this clock is used for programming the Timestamp Addend Register.
If not passed then the system clock will be used and this is fine on some
platforms.
- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
Examples:

View File

@ -0,0 +1,30 @@
* Qualcomm Atheros ath10k wireless devices
For ath10k devices the calibration data can be provided through Device
Tree. The node is a child node of the PCI controller.
Required properties:
-compatible : Should be "qcom,ath10k"
Optional properties:
- qcom,ath10k-calibration-data : calibration data as an array, the
length can vary between hw versions
Example:
pci {
pcie@0 {
reg = <0 0 0 0 0>;
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
device_type = "pci";
ath10k@0,0 {
reg = <0 0 0 0 0>;
device_type = "pci";
qcom,ath10k-calibration-data = [ 01 02 03 ... ];
};
};
};

View File

@ -6,8 +6,10 @@ for SATA and PCIe.
Required properties (controller (parent) node):
- compatible : Should be "st,miphy365x-phy"
- st,syscfg : Should be a phandle of the system configuration register group
which contain the SATA, PCIe mode setting bits
- st,syscfg : Phandle / integer array property. Phandle of sysconfig group
containing the miphy registers and integer array should contain
an entry for each port sub-node, specifying the control
register offset inside the sysconfig group.
Required nodes : A sub-node is required for each channel the controller
provides. Address range information including the usual
@ -26,7 +28,6 @@ Required properties (port (child) node):
registers filled in "reg":
- sata: For SATA devices
- pcie: For PCIe devices
- syscfg: To specify the syscfg based config register
Optional properties (port (child) node):
- st,sata-gen : Generation of locally attached SATA IP. Expected values
@ -39,20 +40,20 @@ Example:
miphy365x_phy: miphy365x@fe382000 {
compatible = "st,miphy365x-phy";
st,syscfg = <&syscfg_rear>;
st,syscfg = <&syscfg_rear 0x824 0x828>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
phy_port0: port@fe382000 {
reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
reg-names = "sata", "pcie", "syscfg";
reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
reg-names = "sata", "pcie";
#phy-cells = <1>;
st,sata-gen = <3>;
};
phy_port1: port@fe38a000 {
reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;;
reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;;
reg-names = "sata", "pcie", "syscfg";
#phy-cells = <1>;
st,pcie-tx-pol-inv;

View File

@ -5,10 +5,7 @@ host controllers (when controlling usb2/1.1 devices) available on STiH407 SoC fa
Required properties:
- compatible : should be "st,stih407-usb2-phy"
- reg : contain the offset and length of the system configuration registers
used as glue logic to control & parameter phy
- reg-names : the names of the system configuration registers in "reg", should be "param" and "reg"
- st,syscfg : sysconfig register to manage phy parameter at driver level
- st,syscfg : phandle of sysconfig bank plus integer array containing phyparam and phyctrl register offsets
- resets : list of phandle and reset specifier pairs. There should be two entries, one
for the whole phy and one for the port
- reset-names : list of reset signal names. Should be "global" and "port"
@ -19,11 +16,8 @@ Example:
usb2_picophy0: usbpicophy@f8 {
compatible = "st,stih407-usb2-phy";
reg = <0xf8 0x04>, /* syscfg 5062 */
<0xf4 0x04>; /* syscfg 5061 */
reg-names = "param", "ctrl";
#phy-cells = <0>;
st,syscfg = <&syscfg_core>;
st,syscfg = <&syscfg_core 0x100 0xf4>;
resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
<&picophyreset STIH407_PICOPHY0_RESET>;
reset-names = "global", "port";

View File

@ -3210,6 +3210,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
retain_initrd [RAM] Keep initrd memory after extraction
rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
1 Unblocked.
rfkill.master_switch_mode=
0 The "airplane mode" button does nothing.
1 The "airplane mode" button toggles between everything
blocked and the previous configuration.
2 The "airplane mode" button toggles between everything
blocked and everything unblocked.
rhash_entries= [KNL,NET]
Set number of hash buckets for route cache

View File

@ -279,8 +279,8 @@ Possible BPF extensions are shown in the following table:
hatype skb->dev->type
rxhash skb->hash
cpu raw_smp_processor_id()
vlan_tci vlan_tx_tag_get(skb)
vlan_pr vlan_tx_tag_present(skb)
vlan_tci skb_vlan_tag_get(skb)
vlan_pr skb_vlan_tag_present(skb)
rand prandom_u32()
These extensions can also be prefixed with '#'.

View File

@ -290,6 +290,28 @@ tcp_frto - INTEGER
By default it's enabled with a non-zero value. 0 disables F-RTO.
tcp_invalid_ratelimit - INTEGER
Limit the maximal rate for sending duplicate acknowledgments
in response to incoming TCP packets that are for an existing
connection but that are invalid due to any of these reasons:
(a) out-of-window sequence number,
(b) out-of-window acknowledgment number, or
(c) PAWS (Protection Against Wrapped Sequence numbers) check failure
This can help mitigate simple "ack loop" DoS attacks, wherein
a buggy or malicious middlebox or man-in-the-middle can
rewrite TCP header fields in manner that causes each endpoint
to think that the other is sending invalid TCP segments, thus
causing each side to send an unterminating stream of duplicate
acknowledgments for invalid segments.
Using 0 disables rate-limiting of dupacks in response to
invalid segments; otherwise this value specifies the minimal
space between sending such dupacks, in milliseconds.
Default: 500 (milliseconds).
tcp_keepalive_time - INTEGER
How often TCP sends out keepalive messages when keepalive is enabled.
Default: 2hours.
@ -1287,6 +1309,13 @@ accept_ra_rtr_pref - BOOLEAN
Functional default: enabled if accept_ra is enabled.
disabled if accept_ra is disabled.
accept_ra_mtu - BOOLEAN
Apply the MTU value specified in RA option 5 (RFC4861). If
disabled, the MTU specified in the RA will be ignored.
Functional default: enabled if accept_ra is enabled.
disabled if accept_ra is disabled.
accept_redirects - BOOLEAN
Accept Redirects.

View File

@ -11,7 +11,8 @@ nf_conntrack_buckets - INTEGER (read-only)
Size of hash table. If not specified as parameter during module
loading, the default size is calculated by dividing total memory
by 16384 to determine the number of buckets but the hash table will
never have fewer than 32 or more than 16384 buckets.
never have fewer than 32 and limited to 16384 buckets. For systems
with more than 4GB of memory it will be 65536 buckets.
nf_conntrack_checksum - BOOLEAN
0 - disabled

View File

@ -131,6 +131,19 @@ performs best-effort detection of overlapping wildcarded flows and may reject
some but not all of them. However, this behavior may change in future versions.
Unique flow identifiers
-----------------------
An alternative to using the original match portion of a key as the handle for
flow identification is a unique flow identifier, or "UFID". UFIDs are optional
for both the kernel and user space program.
User space programs that support UFID are expected to provide it during flow
setup in addition to the flow, then refer to the flow using the UFID for all
future operations. The kernel is not required to index flows by the original
flow key if a UFID is specified.
Basic rule for evolving flow keys
---------------------------------

View File

@ -162,6 +162,27 @@ SOF_TIMESTAMPING_OPT_CMSG:
option IP_PKTINFO simultaneously.
SOF_TIMESTAMPING_OPT_TSONLY:
Applies to transmit timestamps only. Makes the kernel return the
timestamp as a cmsg alongside an empty packet, as opposed to
alongside the original packet. This reduces the amount of memory
charged to the socket's receive budget (SO_RCVBUF) and delivers
the timestamp even if sysctl net.core.tstamp_allow_data is 0.
This option disables SOF_TIMESTAMPING_OPT_CMSG.
New applications are encouraged to pass SOF_TIMESTAMPING_OPT_ID to
disambiguate timestamps and SOF_TIMESTAMPING_OPT_TSONLY to operate
regardless of the setting of sysctl net.core.tstamp_allow_data.
An exception is when a process needs additional cmsg data, for
instance SOL_IP/IP_PKTINFO to detect the egress network interface.
Then pass option SOF_TIMESTAMPING_OPT_CMSG. This option depends on
having access to the contents of the original packet, so cannot be
combined with SOF_TIMESTAMPING_OPT_TSONLY.
1.4 Bytestream Timestamps
The SO_TIMESTAMPING interface supports timestamping of bytes in a

View File

@ -30,6 +30,8 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#define _GNU_SOURCE
#include <arpa/inet.h>
#include <asm/types.h>
#include <error.h>
@ -59,14 +61,6 @@
#include <time.h>
#include <unistd.h>
/* ugly hack to work around netinet/in.h and linux/ipv6.h conflicts */
#ifndef in6_pktinfo
struct in6_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
};
#endif
/* command line parameters */
static int cfg_proto = SOCK_STREAM;
static int cfg_ipproto = IPPROTO_TCP;
@ -76,6 +70,7 @@ static int do_ipv6 = 1;
static int cfg_payload_len = 10;
static bool cfg_show_payload;
static bool cfg_do_pktinfo;
static bool cfg_loop_nodata;
static uint16_t dest_port = 9000;
static struct sockaddr_in daddr;
@ -147,6 +142,9 @@ static void print_payload(char *data, int len)
{
int i;
if (!len)
return;
if (len > 70)
len = 70;
@ -183,6 +181,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
struct sock_extended_err *serr = NULL;
struct scm_timestamping *tss = NULL;
struct cmsghdr *cm;
int batch = 0;
for (cm = CMSG_FIRSTHDR(msg);
cm && cm->cmsg_len;
@ -215,10 +214,18 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
} else
fprintf(stderr, "unknown cmsg %d,%d\n",
cm->cmsg_level, cm->cmsg_type);
if (serr && tss) {
print_timestamp(tss, serr->ee_info, serr->ee_data,
payload_len);
serr = NULL;
tss = NULL;
batch++;
}
}
if (serr && tss)
print_timestamp(tss, serr->ee_info, serr->ee_data, payload_len);
if (batch > 1)
fprintf(stderr, "batched %d timestamps\n", batch);
}
static int recv_errmsg(int fd)
@ -250,7 +257,7 @@ static int recv_errmsg(int fd)
if (ret == -1 && errno != EAGAIN)
error(1, errno, "recvmsg");
if (ret > 0) {
if (ret >= 0) {
__recv_errmsg_cmsg(&msg, ret);
if (cfg_show_payload)
print_payload(data, cfg_payload_len);
@ -315,6 +322,9 @@ static void do_test(int family, unsigned int opt)
opt |= SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_OPT_CMSG |
SOF_TIMESTAMPING_OPT_ID;
if (cfg_loop_nodata)
opt |= SOF_TIMESTAMPING_OPT_TSONLY;
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
(char *) &opt, sizeof(opt)))
error(1, 0, "setsockopt timestamping");
@ -384,6 +394,7 @@ static void __attribute__((noreturn)) usage(const char *filepath)
" -h: show this message\n"
" -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
" -n: set no-payload option\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
" -p N: connect to port N\n"
@ -398,7 +409,7 @@ static void parse_opt(int argc, char **argv)
int proto_count = 0;
char c;
while ((c = getopt(argc, argv, "46hIl:p:rRux")) != -1) {
while ((c = getopt(argc, argv, "46hIl:np:rRux")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
@ -409,6 +420,9 @@ static void parse_opt(int argc, char **argv)
case 'I':
cfg_do_pktinfo = true;
break;
case 'n':
cfg_loop_nodata = true;
break;
case 'r':
proto_count++;
cfg_proto = SOCK_RAW;

View File

@ -25,6 +25,9 @@ whether they can be changed or not:
- soft block: writable radio block (need not be readable) that is set by
the system software.
The rfkill subsystem has two parameters, rfkill.default_state and
rfkill.master_switch_mode, which are documented in kernel-parameters.txt.
2. Implementation details

View File

@ -97,6 +97,14 @@ rmem_max
The maximum receive socket buffer size in bytes.
tstamp_allow_data
-----------------
Allow processes to receive tx timestamps looped together with the original
packet contents. If disabled, transmit timestamp requests from unprivileged
processes are dropped unless socket option SOF_TIMESTAMPING_OPT_TSONLY is set.
Default: 1 (on)
wmem_default
------------

View File

@ -1661,7 +1661,6 @@ M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
L: linux-wireless@vger.kernel.org
L: ath5k-devel@lists.ath5k.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
S: Maintained
F: drivers/net/wireless/ath/ath5k/
@ -6652,6 +6651,7 @@ F: include/linux/netdevice.h
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
F: include/uapi/linux/net_namespace.h
F: tools/net/
F: tools/testing/selftests/net/
F: lib/random32.c
@ -7090,11 +7090,12 @@ F: arch/openrisc/
OPENVSWITCH
M: Pravin Shelar <pshelar@nicira.com>
L: netdev@vger.kernel.org
L: dev@openvswitch.org
W: http://openvswitch.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
S: Maintained
F: net/openvswitch/
F: include/uapi/linux/openvswitch.h
OPL4 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
@ -8097,6 +8098,13 @@ S: Maintained
F: Documentation/rfkill.txt
F: net/rfkill/
RHASHTABLE
M: Thomas Graf <tgraf@suug.ch>
L: netdev@vger.kernel.org
S: Maintained
F: lib/rhashtable.c
F: include/linux/rhashtable.h
RICOH SMARTMEDIA/XD DRIVER
M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
@ -9655,6 +9663,13 @@ F: drivers/power/lp8788-charger.c
F: drivers/regulator/lp8788-*.c
F: include/linux/mfd/lp8788*.h
TI NETCP ETHERNET DRIVER
M: Wingman Kwok <w-kwok2@ti.com>
M: Murali Karicheri <m-karicheri2@ti.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/netcp*
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)

View File

@ -31,6 +31,7 @@
status = "disabled";
reg = <0x5c000000 0x30000>;
interrupts = <67 68 69 70>;
syscon = <&omap3_scm_general>;
ti,davinci-ctrl-reg-offset = <0x10000>;
ti,davinci-ctrl-mod-reg-offset = <0>;
ti,davinci-ctrl-ram-offset = <0x20000>;

View File

@ -15,6 +15,13 @@
/ {
compatible = "rockchip,rk3288-evb-rk808", "rockchip,rk3288";
ext_gmac: external-gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
clock-output-names = "ext_gmac";
#clock-cells = <0>;
};
};
&cpu0 {
@ -152,3 +159,19 @@
};
};
};
&gmac {
phy-supply = <&vcc_phy>;
phy-mode = "rgmii";
clock_in_out = "input";
snps,reset-gpio = <&gpio4 7 0>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 1000000>;
assigned-clocks = <&cru SCLK_MAC>;
assigned-clock-parents = <&ext_gmac>;
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>;
tx_delay = <0x30>;
rx_delay = <0x10>;
status = "ok";
};

View File

@ -90,6 +90,19 @@
regulator-always-on;
regulator-boot-on;
};
vcc_phy: vcc-phy-regulator {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio0 6 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&eth_phy_pwr>;
regulator-name = "vcc_phy";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
};
};
&emmc {
@ -208,6 +221,12 @@
rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
eth_phy {
eth_phy_pwr: eth-phy-pwr {
rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
&usb_host0_ehci {

View File

@ -380,6 +380,22 @@
status = "disabled";
};
gmac: ethernet@ff290000 {
compatible = "rockchip,rk3288-gmac";
reg = <0xff290000 0x10000>;
interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
rockchip,grf = <&grf>;
clocks = <&cru SCLK_MAC>,
<&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
<&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
<&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
clock-names = "stmmaceth",
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
};
usb_host0_ehci: usb@ff500000 {
compatible = "generic-ehci";
reg = <0xff500000 0x100>;
@ -725,6 +741,11 @@
bias-disable;
};
pcfg_pull_none_12ma: pcfg-pull-none-12ma {
bias-disable;
drive-strength = <12>;
};
i2c0 {
i2c0_xfer: i2c0-xfer {
rockchip,pins = <0 15 RK_FUNC_1 &pcfg_pull_none>,
@ -1068,5 +1089,38 @@
rockchip,pins = <7 23 3 &pcfg_pull_none>;
};
};
gmac {
rgmii_pins: rgmii-pins {
rockchip,pins = <3 30 3 &pcfg_pull_none>,
<3 31 3 &pcfg_pull_none>,
<3 26 3 &pcfg_pull_none>,
<3 27 3 &pcfg_pull_none>,
<3 28 3 &pcfg_pull_none_12ma>,
<3 29 3 &pcfg_pull_none_12ma>,
<3 24 3 &pcfg_pull_none_12ma>,
<3 25 3 &pcfg_pull_none_12ma>,
<4 0 3 &pcfg_pull_none>,
<4 5 3 &pcfg_pull_none>,
<4 6 3 &pcfg_pull_none>,
<4 9 3 &pcfg_pull_none_12ma>,
<4 4 3 &pcfg_pull_none_12ma>,
<4 1 3 &pcfg_pull_none>,
<4 3 3 &pcfg_pull_none>;
};
rmii_pins: rmii-pins {
rockchip,pins = <3 30 3 &pcfg_pull_none>,
<3 31 3 &pcfg_pull_none>,
<3 28 3 &pcfg_pull_none>,
<3 29 3 &pcfg_pull_none>,
<4 0 3 &pcfg_pull_none>,
<4 5 3 &pcfg_pull_none>,
<4 4 3 &pcfg_pull_none>,
<4 1 3 &pcfg_pull_none>,
<4 2 3 &pcfg_pull_none>,
<4 3 3 &pcfg_pull_none>;
};
};
};
};

View File

@ -274,5 +274,14 @@
status = "disabled";
};
usb2_picophy0: phy1 {
compatible = "st,stih407-usb2-phy";
#phy-cells = <0>;
st,syscfg = <&syscfg_core 0x100 0xf4>;
resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
<&picophyreset STIH407_PICOPHY0_RESET>;
reset-names = "global", "port";
};
};
};

View File

@ -10,5 +10,75 @@
#include "stih407-family.dtsi"
#include "stih410-pinctrl.dtsi"
/ {
soc {
usb2_picophy1: phy2 {
compatible = "st,stih407-usb2-phy";
#phy-cells = <0>;
st,syscfg = <&syscfg_core 0xf8 0xf4>;
resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
<&picophyreset STIH407_PICOPHY0_RESET>;
reset-names = "global", "port";
};
usb2_picophy2: phy3 {
compatible = "st,stih407-usb2-phy";
#phy-cells = <0>;
st,syscfg = <&syscfg_core 0xfc 0xf4>;
resets = <&softreset STIH407_PICOPHY_SOFTRESET>,
<&picophyreset STIH407_PICOPHY1_RESET>;
reset-names = "global", "port";
};
ohci0: usb@9a03c00 {
compatible = "st,st-ohci-300x";
reg = <0x9a03c00 0x100>;
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
phys = <&usb2_picophy1>;
phy-names = "usb";
};
ehci0: usb@9a03e00 {
compatible = "st,st-ehci-300x";
reg = <0x9a03e00 0x100>;
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
phys = <&usb2_picophy1>;
phy-names = "usb";
};
ohci1: usb@9a83c00 {
compatible = "st,st-ohci-300x";
reg = <0x9a83c00 0x100>;
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";
phys = <&usb2_picophy2>;
phy-names = "usb";
};
ehci1: usb@9a83e00 {
compatible = "st,st-ehci-300x";
reg = <0x9a83e00 0x100>;
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";
phys = <&usb2_picophy2>;
phy-names = "usb";
};
};
};

View File

@ -153,8 +153,8 @@
compatible = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
status = "disabled";
reg = <0xfe810000 0x8000>, <0x148 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
reg = <0xfe810000 0x8000>;
reg-names = "stmmaceth";
interrupts = <0 147 0>, <0 148 0>, <0 149 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@ -165,7 +165,7 @@
snps,mixed-burst;
snps,force_sf_dma_mode;
st,syscon = <&syscfg_rear>;
st,syscon = <&syscfg_rear 0x148>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mii0>;
@ -177,8 +177,8 @@
device_type = "network";
compatible = "st,stih415-dwmac", "snps,dwmac", "snps,dwmac-3.610";
status = "disabled";
reg = <0xfef08000 0x8000>, <0x74 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
reg = <0xfef08000 0x8000>;
reg-names = "stmmaceth";
interrupts = <0 150 0>, <0 151 0>, <0 152 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@ -186,7 +186,7 @@
snps,mixed-burst;
snps,force_sf_dma_mode;
st,syscon = <&syscfg_sbc>;
st,syscon = <&syscfg_sbc 0x74>;
resets = <&softreset STIH415_ETH1_SOFTRESET>;
reset-names = "stmmaceth";

View File

@ -163,8 +163,8 @@
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
status = "disabled";
reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
reg = <0xfe810000 0x8000>;
reg-names = "stmmaceth";
interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
@ -172,7 +172,7 @@
snps,pbl = <32>;
snps,mixed-burst;
st,syscon = <&syscfg_rear>;
st,syscon = <&syscfg_rear 0x8bc>;
resets = <&softreset STIH416_ETH0_SOFTRESET>;
reset-names = "stmmaceth";
pinctrl-names = "default";
@ -185,15 +185,15 @@
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
status = "disabled";
reg = <0xfef08000 0x8000>, <0x7f0 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
reg = <0xfef08000 0x8000>;
reg-names = "stmmaceth";
interrupts = <0 136 0>, <0 137 0>, <0 138 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
snps,pbl = <32>;
snps,mixed-burst;
st,syscon = <&syscfg_sbc>;
st,syscon = <&syscfg_sbc 0x7f0>;
resets = <&softreset STIH416_ETH1_SOFTRESET>;
reset-names = "stmmaceth";
@ -283,21 +283,21 @@
miphy365x_phy: phy@fe382000 {
compatible = "st,miphy365x-phy";
st,syscfg = <&syscfg_rear>;
st,syscfg = <&syscfg_rear 0x824 0x828>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
phy_port0: port@fe382000 {
#phy-cells = <1>;
reg = <0xfe382000 0x100>, <0xfe394000 0x100>, <0x824 0x4>;
reg-names = "sata", "pcie", "syscfg";
reg = <0xfe382000 0x100>, <0xfe394000 0x100>;
reg-names = "sata", "pcie";
};
phy_port1: port@fe38a000 {
#phy-cells = <1>;
reg = <0xfe38a000 0x100>, <0xfe804000 0x100>, <0x828 0x4>;
reg-names = "sata", "pcie", "syscfg";
reg = <0xfe38a000 0x100>, <0xfe804000 0x100>;
reg-names = "sata", "pcie";
};
};

View File

@ -456,6 +456,7 @@ CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
CONFIG_PHY_MIPHY365X=y
CONFIG_PHY_STIH41X_USB=y
CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_SUN4I_USB=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y

View File

@ -37,7 +37,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/irda.h>
#include <linux/platform_data/irda-sa11x0.h>
#include <asm/mach/map.h>
#include <mach/assabet.h>
#include <linux/platform_data/mfd-mcp-sa11x0.h>

View File

@ -43,7 +43,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/mach/irda.h>
#include <linux/platform_data/irda-sa11x0.h>
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>

View File

@ -18,7 +18,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/irda.h>
#include <linux/platform_data/irda-sa11x0.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>

View File

@ -18,7 +18,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/irda.h>
#include <linux/platform_data/irda-sa11x0.h>
#include <mach/h3xxx.h>
#include <mach/irqs.h>

View File

@ -17,6 +17,7 @@
#include <linux/clockchips.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/timecounter.h>
#include <asm/cpuinfo.h>
static void __iomem *timer_baseaddr;

View File

@ -338,49 +338,31 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
int write)
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
{
unsigned long from = (unsigned long)addr;
unsigned long npages;
unsigned off;
int err;
int i;
size_t off;
ssize_t n;
int npages, i;
err = -EFAULT;
if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
goto out;
n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off);
if (n < 0)
return n;
off = from & ~PAGE_MASK;
npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (npages > ALG_MAX_PAGES)
npages = ALG_MAX_PAGES;
err = get_user_pages_fast(from, npages, write, sgl->pages);
if (err < 0)
goto out;
npages = err;
err = -EINVAL;
npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (WARN_ON(npages == 0))
goto out;
err = 0;
return -EINVAL;
sg_init_table(sgl->sg, npages);
for (i = 0; i < npages; i++) {
for (i = 0, len = n; i < npages; i++) {
int plen = min_t(int, len, PAGE_SIZE - off);
sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
off = 0;
len -= plen;
err += plen;
}
out:
return err;
return n;
}
EXPORT_SYMBOL_GPL(af_alg_make_sg);

View File

@ -41,8 +41,6 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned long iovlen;
const struct iovec *iov;
long copied = 0;
int err;
@ -58,37 +56,28 @@ static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
ctx->more = 0;
for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (iov_iter_count(&msg->msg_iter)) {
int len = iov_iter_count(&msg->msg_iter);
while (seglen) {
int len = min_t(unsigned long, seglen, limit);
int newlen;
if (len > limit)
len = limit;
newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
if (newlen < 0) {
err = copied ? 0 : newlen;
goto unlock;
}
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
newlen);
err = af_alg_wait_for_completion(
crypto_ahash_update(&ctx->req),
&ctx->completion);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
seglen -= newlen;
from += newlen;
copied += newlen;
len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
if (len < 0) {
err = copied ? 0 : len;
goto unlock;
}
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
&ctx->completion);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
copied += len;
iov_iter_advance(&msg->msg_iter, len);
}
err = 0;

View File

@ -426,67 +426,58 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
const struct iovec *iov;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs; iovlen > 0;
iovlen--, iov++) {
unsigned long seglen = iov->iov_len;
char __user *from = iov->iov_base;
while (iov_iter_count(&msg->msg_iter)) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (seglen) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
while (!sg->length)
sg++;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, ctx->used, seglen);
used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
err = used;
if (err < 0)
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
if (ctx->more || used < ctx->used)
used -= used % bs;
used = min_t(unsigned long, ctx->used, iov_iter_count(&msg->msg_iter));
err = -EINVAL;
if (!used)
goto free;
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
if (err < 0)
goto unlock;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
if (ctx->more || used < ctx->used)
used -= used % bs;
err = af_alg_wait_for_completion(
err = -EINVAL;
if (!used)
goto free;
ablkcipher_request_set_crypt(&ctx->req, sg,
ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_ablkcipher_encrypt(&ctx->req) :
crypto_ablkcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
if (err)
goto unlock;
copied += used;
from += used;
seglen -= used;
skcipher_pull_sgl(sk, used);
}
copied += used;
skcipher_pull_sgl(sk, used);
iov_iter_advance(&msg->msg_iter, used);
}
err = 0;

View File

@ -100,7 +100,6 @@ int acpi_bus_generate_netlink_event(const char *device_class,
struct acpi_genl_event *event;
void *msg_header;
int size;
int result;
/* allocate memory */
size = nla_total_size(sizeof(struct acpi_genl_event)) +
@ -137,11 +136,7 @@ int acpi_bus_generate_netlink_event(const char *device_class,
event->data = data;
/* send multicast genetlink message */
result = genlmsg_end(skb, msg_header);
if (result < 0) {
nlmsg_free(skb);
return result;
}
genlmsg_end(skb, msg_header);
genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC);
return 0;

View File

@ -354,9 +354,9 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
eni_vcc = ENI_VCC(vcc);
paddr = 0; /* GCC, shut up */
if (skb) {
paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(eni_dev->pci_dev, paddr))
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr))
goto dma_map_error;
ENI_PRV_PADDR(skb) = paddr;
if (paddr & 3)
@ -481,8 +481,8 @@ rx_enqueued++;
trouble:
if (paddr)
pci_unmap_single(eni_dev->pci_dev,paddr,skb->len,
PCI_DMA_FROMDEVICE);
dma_unmap_single(&eni_dev->pci_dev->dev,paddr,skb->len,
DMA_FROM_DEVICE);
dma_map_error:
if (skb) dev_kfree_skb_irq(skb);
return -1;
@ -758,8 +758,8 @@ rx_dequeued++;
}
eni_vcc->rxing--;
eni_vcc->rx_pos = ENI_PRV_POS(skb) & (eni_vcc->words-1);
pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
PCI_DMA_TODEVICE);
dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
DMA_TO_DEVICE);
if (!skb->len) dev_kfree_skb_irq(skb);
else {
EVENT("pushing (len=%ld)\n",skb->len,0);
@ -1112,8 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags);
vcc->dev->number);
return enq_jam;
}
paddr = pci_map_single(eni_dev->pci_dev,skb->data,skb->len,
PCI_DMA_TODEVICE);
paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len,
DMA_TO_DEVICE);
ENI_PRV_PADDR(skb) = paddr;
/* prepare DMA queue entries */
j = 0;
@ -1226,8 +1226,8 @@ static void dequeue_tx(struct atm_dev *dev)
break;
}
ENI_VCC(vcc)->txing -= ENI_PRV_SIZE(skb);
pci_unmap_single(eni_dev->pci_dev,ENI_PRV_PADDR(skb),skb->len,
PCI_DMA_TODEVICE);
dma_unmap_single(&eni_dev->pci_dev->dev,ENI_PRV_PADDR(skb),skb->len,
DMA_TO_DEVICE);
if (vcc->pop) vcc->pop(vcc,skb);
else dev_kfree_skb_irq(skb);
atomic_inc(&vcc->stats->tx);
@ -2240,13 +2240,18 @@ static int eni_init_one(struct pci_dev *pci_dev,
if (rc < 0)
goto out;
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (rc < 0)
goto out;
rc = -ENOMEM;
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
if (!eni_dev)
goto err_disable;
zero = &eni_dev->zero;
zero->addr = pci_alloc_consistent(pci_dev, ENI_ZEROES_SIZE, &zero->dma);
zero->addr = dma_alloc_coherent(&pci_dev->dev,
ENI_ZEROES_SIZE, &zero->dma, GFP_KERNEL);
if (!zero->addr)
goto err_kfree;
@ -2277,7 +2282,7 @@ err_eni_release:
err_unregister:
atm_dev_deregister(dev);
err_free_consistent:
pci_free_consistent(pci_dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
dma_free_coherent(&pci_dev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
err_kfree:
kfree(eni_dev);
err_disable:
@ -2302,7 +2307,7 @@ static void eni_remove_one(struct pci_dev *pdev)
eni_do_release(dev);
atm_dev_deregister(dev);
pci_free_consistent(pdev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
dma_free_coherent(&pdev->dev, ENI_ZEROES_SIZE, zero->addr, zero->dma);
kfree(ed);
pci_disable_device(pdev);
}

View File

@ -425,7 +425,7 @@ static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
static u32
fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
{
u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
u32 dma_addr = dma_map_single(&((struct pci_dev *) fore200e->bus_dev)->dev, virt_addr, size, direction);
DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
virt_addr, size, direction, dma_addr);
@ -440,7 +440,7 @@ fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int di
DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
dma_addr, size, direction);
pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
dma_unmap_single(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
@ -449,7 +449,7 @@ fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size,
{
DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
dma_sync_single_for_cpu(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
static void
@ -457,7 +457,7 @@ fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int si
{
DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
dma_sync_single_for_device(&((struct pci_dev *) fore200e->bus_dev)->dev, dma_addr, size, direction);
}
@ -470,9 +470,10 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
{
/* returned chunks are page-aligned */
chunk->alloc_size = size * nbr;
chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
chunk->alloc_size,
&chunk->dma_addr);
chunk->alloc_addr = dma_alloc_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
chunk->alloc_size,
&chunk->dma_addr,
GFP_KERNEL);
if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
return -ENOMEM;
@ -488,7 +489,7 @@ fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
static void
fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
{
pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
dma_free_coherent(&((struct pci_dev *) fore200e->bus_dev)->dev,
chunk->alloc_size,
chunk->alloc_addr,
chunk->dma_addr);
@ -2707,6 +2708,11 @@ static int fore200e_pca_detect(struct pci_dev *pci_dev,
err = -EINVAL;
goto out;
}
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
err = -EINVAL;
goto out;
}
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
if (fore200e == NULL) {

View File

@ -359,7 +359,7 @@ static int he_init_one(struct pci_dev *pci_dev,
if (pci_enable_device(pci_dev))
return -EIO;
if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)) != 0) {
if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "he: no suitable dma available\n");
err = -EIO;
goto init_one_failure;
@ -533,9 +533,9 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
static int he_init_tpdrq(struct he_dev *he_dev)
{
he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
&he_dev->tpdrq_phys);
he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
&he_dev->tpdrq_phys, GFP_KERNEL);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
@ -796,16 +796,16 @@ static int he_init_group(struct he_dev *he_dev, int group)
}
/* large buffer pool */
he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
he_dev->rbpl_pool = dma_pool_create("rbpl", &he_dev->pci_dev->dev,
CONFIG_RBPL_BUFSIZE, 64, 0);
if (he_dev->rbpl_pool == NULL) {
hprintk("unable to create rbpl pool\n");
goto out_free_rbpl_virt;
}
he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
&he_dev->rbpl_phys);
he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
&he_dev->rbpl_phys, GFP_KERNEL);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
@ -815,7 +815,7 @@ static int he_init_group(struct he_dev *he_dev, int group)
for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL|GFP_DMA, &mapping);
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping);
if (!heb)
goto out_free_rbpl;
heb->mapping = mapping;
@ -842,9 +842,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* rx buffer ready queue */
he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
&he_dev->rbrq_phys);
he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
&he_dev->rbrq_phys, GFP_KERNEL);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
@ -866,9 +866,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* tx buffer ready queue */
he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
&he_dev->tbrq_phys);
he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
&he_dev->tbrq_phys, GFP_KERNEL);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
@ -884,18 +884,18 @@ static int he_init_group(struct he_dev *he_dev, int group)
return 0;
out_free_rbpq_base:
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE *
sizeof(struct he_rbrq), he_dev->rbrq_base,
he_dev->rbrq_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE *
sizeof(struct he_rbrq), he_dev->rbrq_base,
he_dev->rbrq_phys);
out_free_rbpl:
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE *
sizeof(struct he_rbp), he_dev->rbpl_base,
he_dev->rbpl_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE *
sizeof(struct he_rbp), he_dev->rbpl_base,
he_dev->rbpl_phys);
out_destroy_rbpl_pool:
pci_pool_destroy(he_dev->rbpl_pool);
dma_pool_destroy(he_dev->rbpl_pool);
out_free_rbpl_virt:
kfree(he_dev->rbpl_virt);
out_free_rbpl_table:
@ -911,8 +911,11 @@ static int he_init_irq(struct he_dev *he_dev)
/* 2.9.3.5 tail offset for each interrupt queue is located after the
end of the interrupt queue */
he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
(CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev,
(CONFIG_IRQ_SIZE + 1)
* sizeof(struct he_irq),
&he_dev->irq_phys,
GFP_KERNEL);
if (he_dev->irq_base == NULL) {
hprintk("failed to allocate irq\n");
return -ENOMEM;
@ -1419,10 +1422,10 @@ static int he_start(struct atm_dev *dev)
he_init_tpdrq(he_dev);
he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
he_dev->tpd_pool = dma_pool_create("tpd", &he_dev->pci_dev->dev,
sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
if (he_dev->tpd_pool == NULL) {
hprintk("unable to create tpd pci_pool\n");
hprintk("unable to create tpd dma_pool\n");
return -ENOMEM;
}
@ -1459,9 +1462,9 @@ static int he_start(struct atm_dev *dev)
/* host status page */
he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
sizeof(struct he_hsp),
&he_dev->hsp_phys);
he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev,
sizeof(struct he_hsp),
&he_dev->hsp_phys, GFP_KERNEL);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
@ -1558,41 +1561,41 @@ he_stop(struct he_dev *he_dev)
free_irq(he_dev->irq, he_dev);
if (he_dev->irq_base)
pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
dma_free_coherent(&he_dev->pci_dev->dev, (CONFIG_IRQ_SIZE + 1)
* sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
if (he_dev->hsp)
pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
he_dev->hsp, he_dev->hsp_phys);
dma_free_coherent(&he_dev->pci_dev->dev, sizeof(struct he_hsp),
he_dev->hsp, he_dev->hsp_phys);
if (he_dev->rbpl_base) {
list_for_each_entry_safe(heb, next, &he_dev->rbpl_outstanding, entry)
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBPL_SIZE
* sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
}
kfree(he_dev->rbpl_virt);
kfree(he_dev->rbpl_table);
if (he_dev->rbpl_pool)
pci_pool_destroy(he_dev->rbpl_pool);
dma_pool_destroy(he_dev->rbpl_pool);
if (he_dev->rbrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
he_dev->rbrq_base, he_dev->rbrq_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
he_dev->rbrq_base, he_dev->rbrq_phys);
if (he_dev->tbrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tbrq_base, he_dev->tbrq_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tbrq_base, he_dev->tbrq_phys);
if (he_dev->tpdrq_base)
pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
if (he_dev->tpd_pool)
pci_pool_destroy(he_dev->tpd_pool);
dma_pool_destroy(he_dev->tpd_pool);
if (he_dev->pci_dev) {
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
@ -1610,7 +1613,7 @@ __alloc_tpd(struct he_dev *he_dev)
struct he_tpd *tpd;
dma_addr_t mapping;
tpd = pci_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC|GFP_DMA, &mapping);
tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping);
if (tpd == NULL)
return NULL;
@ -1681,7 +1684,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
if (!RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
clear_bit(i, he_dev->rbpl_table);
list_del(&heb->entry);
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
}
goto next_rbrq_entry;
@ -1774,7 +1777,7 @@ return_host_buffers:
++pdus_assembled;
list_for_each_entry_safe(heb, next, &he_vcc->buffers, entry)
pci_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping);
INIT_LIST_HEAD(&he_vcc->buffers);
he_vcc->pdu_len = 0;
@ -1843,10 +1846,10 @@ he_service_tbrq(struct he_dev *he_dev, int group)
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
if (tpd->iovec[slot].addr)
pci_unmap_single(he_dev->pci_dev,
dma_unmap_single(&he_dev->pci_dev->dev,
tpd->iovec[slot].addr,
tpd->iovec[slot].len & TPD_LEN_MASK,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
if (tpd->iovec[slot].len & TPD_LST)
break;
@ -1861,7 +1864,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)
next_tbrq_entry:
if (tpd)
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
he_dev->tbrq_head = (struct he_tbrq *)
((unsigned long) he_dev->tbrq_base |
TBRQ_MASK(he_dev->tbrq_head + 1));
@ -1905,7 +1908,7 @@ he_service_rbpl(struct he_dev *he_dev, int group)
}
he_dev->rbpl_hint = i + 1;
heb = pci_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC|GFP_DMA, &mapping);
heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping);
if (!heb)
break;
heb->mapping = mapping;
@ -2084,10 +2087,10 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
*/
for (slot = 0; slot < TPD_MAXIOV; ++slot) {
if (tpd->iovec[slot].addr)
pci_unmap_single(he_dev->pci_dev,
dma_unmap_single(&he_dev->pci_dev->dev,
tpd->iovec[slot].addr,
tpd->iovec[slot].len & TPD_LEN_MASK,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
}
if (tpd->skb) {
if (tpd->vcc->pop)
@ -2096,7 +2099,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
dev_kfree_skb_any(tpd->skb);
atomic_inc(&tpd->vcc->stats->tx_err);
}
pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
return;
}
}
@ -2550,8 +2553,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
}
#ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
tpd->iovec[slot].len = skb_headlen(skb);
++slot;
@ -2579,9 +2582,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
slot = 0;
}
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
(void *) page_address(frag->page) + frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
frag->size, DMA_TO_DEVICE);
tpd->iovec[slot].len = frag->size;
++slot;
@ -2589,7 +2592,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
tpd->iovec[slot - 1].len |= TPD_LST;
#else
tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
tpd->address0 = dma_map_single(&he_dev->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
tpd->length0 = skb->len | TPD_LST;
#endif
tpd->status |= TPD_INT;

View File

@ -281,7 +281,7 @@ struct he_dev {
int irq_peak;
struct tasklet_struct tasklet;
struct pci_pool *tpd_pool;
struct dma_pool *tpd_pool;
struct list_head outstanding_tpds;
dma_addr_t tpdrq_phys;
@ -296,7 +296,7 @@ struct he_dev {
struct he_buff **rbpl_virt;
unsigned long *rbpl_table;
unsigned long rbpl_hint;
struct pci_pool *rbpl_pool;
struct dma_pool *rbpl_pool;
dma_addr_t rbpl_phys;
struct he_rbp *rbpl_base, *rbpl_tail;
struct list_head rbpl_outstanding;

View File

@ -458,12 +458,6 @@ static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode,
return;
}
static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
chan * TX_CHANNEL_CONFIG_MULT | mode);
return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
}
/********** dump functions **********/
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
@ -513,16 +507,6 @@ static inline void dump_framer (hrz_dev * dev) {
/* RX channels are 10 bit integers, these fns are quite paranoid */
static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
unsigned short vci_bits = 10 - vpi_bits;
if ((channel & RX_CHANNEL_MASK) == channel) {
*vci = channel & ((~0)<<vci_bits);
*vpi = channel >> vci_bits;
return channel ? 0 : -EINVAL;
}
return -EINVAL;
}
static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
unsigned short vci_bits = 10 - vpi_bits;
if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
@ -1260,14 +1244,6 @@ static u32 rx_queue_entry_next (hrz_dev * dev) {
return rx_queue_entry;
}
/********** handle RX disabled by device **********/
static inline void rx_disabled_handler (hrz_dev * dev) {
wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
// count me please
PRINTK (KERN_WARNING, "RX was disabled!");
}
/********** handle RX data received by device **********/
// called from IRQ handler

View File

@ -641,7 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr);
scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE,
&scq->paddr, GFP_KERNEL);
if (scq->base == NULL) {
kfree(scq);
return NULL;
@ -669,12 +670,12 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
struct sk_buff *skb;
struct atm_vcc *vcc;
pci_free_consistent(card->pcidev, SCQ_SIZE,
scq->base, scq->paddr);
dma_free_coherent(&card->pcidev->dev, SCQ_SIZE,
scq->base, scq->paddr);
while ((skb = skb_dequeue(&scq->transmit))) {
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
skb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc->pop)
@ -684,8 +685,8 @@ free_scq(struct idt77252_dev *card, struct scq_info *scq)
}
while ((skb = skb_dequeue(&scq->pending))) {
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
skb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc->pop)
@ -800,8 +801,8 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
if (skb) {
TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
skb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb->len, DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
@ -846,8 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
tbd = &IDT77252_PRV_TBD(skb);
vcc = ATM_SKB(skb)->vcc;
IDT77252_PRV_PADDR(skb) = pci_map_single(card->pcidev, skb->data,
skb->len, PCI_DMA_TODEVICE);
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
error = -EINVAL;
@ -924,8 +925,8 @@ done:
return 0;
errout:
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
skb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb->len, DMA_TO_DEVICE);
return error;
}
@ -970,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
{
struct rsq_entry *rsqe;
card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE,
&card->rsq.paddr);
card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE,
&card->rsq.paddr, GFP_KERNEL);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
@ -1001,8 +1002,8 @@ init_rsq(struct idt77252_dev *card)
static void
deinit_rsq(struct idt77252_dev *card)
{
pci_free_consistent(card->pcidev, RSQSIZE,
card->rsq.base, card->rsq.paddr);
dma_free_coherent(&card->pcidev->dev, RSQSIZE,
card->rsq.base, card->rsq.paddr);
}
static void
@ -1057,9 +1058,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
vcc = vc->rx_vcc;
pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
if ((vcc->qos.aal == ATM_AAL0) ||
(vcc->qos.aal == ATM_AAL34)) {
@ -1180,9 +1181,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
return;
}
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
sb_pool_remove(card, skb);
skb_trim(skb, len);
@ -1254,9 +1255,9 @@ idt77252_rx_raw(struct idt77252_dev *card)
head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
tail = readl(SAR_REG_RAWCT);
pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
skb_end_offset(queue) - 16,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&card->pcidev->dev, IDT77252_PRV_PADDR(queue),
skb_end_offset(queue) - 16,
DMA_FROM_DEVICE);
while (head != tail) {
unsigned int vpi, vci;
@ -1348,11 +1349,11 @@ drop:
if (next) {
card->raw_cell_head = next;
queue = card->raw_cell_head;
pci_dma_sync_single_for_cpu(card->pcidev,
IDT77252_PRV_PADDR(queue),
(skb_end_pointer(queue) -
queue->data),
PCI_DMA_FROMDEVICE);
dma_sync_single_for_cpu(&card->pcidev->dev,
IDT77252_PRV_PADDR(queue),
(skb_end_pointer(queue) -
queue->data),
DMA_FROM_DEVICE);
} else {
card->raw_cell_head = NULL;
printk("%s: raw cell queue overrun\n",
@ -1375,8 +1376,8 @@ init_tsq(struct idt77252_dev *card)
{
struct tsq_entry *tsqe;
card->tsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
&card->tsq.paddr);
card->tsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
&card->tsq.paddr, GFP_KERNEL);
if (card->tsq.base == NULL) {
printk("%s: can't allocate TSQ.\n", card->name);
return -1;
@ -1398,8 +1399,8 @@ init_tsq(struct idt77252_dev *card)
static void
deinit_tsq(struct idt77252_dev *card)
{
pci_free_consistent(card->pcidev, TSQSIZE,
card->tsq.base, card->tsq.paddr);
dma_free_coherent(&card->pcidev->dev, TSQSIZE,
card->tsq.base, card->tsq.paddr);
}
static void
@ -1861,9 +1862,9 @@ add_rx_skb(struct idt77252_dev *card, int queue,
goto outfree;
}
paddr = pci_map_single(card->pcidev, skb->data,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@ -1875,8 +1876,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
return;
outunmap:
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
@ -1892,15 +1893,15 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
u32 handle = IDT77252_PRV_POOL(skb);
int err;
pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
PCI_DMA_FROMDEVICE);
dma_sync_single_for_device(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
err = push_rx_skb(card, skb, POOL_QUEUE(handle));
if (err) {
pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data,
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
sb_pool_remove(card, skb);
dev_kfree_skb(skb);
}
@ -3058,11 +3059,11 @@ deinit_card(struct idt77252_dev *card)
for (j = 0; j < FBQ_SIZE; j++) {
skb = card->sbpool[i].skb[j];
if (skb) {
pci_unmap_single(card->pcidev,
dma_unmap_single(&card->pcidev->dev,
IDT77252_PRV_PADDR(skb),
(skb_end_pointer(skb) -
skb->data),
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
card->sbpool[i].skb[j] = NULL;
dev_kfree_skb(skb);
}
@ -3076,8 +3077,8 @@ deinit_card(struct idt77252_dev *card)
vfree(card->vcs);
if (card->raw_cell_hnd) {
pci_free_consistent(card->pcidev, 2 * sizeof(u32),
card->raw_cell_hnd, card->raw_cell_paddr);
dma_free_coherent(&card->pcidev->dev, 2 * sizeof(u32),
card->raw_cell_hnd, card->raw_cell_paddr);
}
if (card->rsq.base) {
@ -3397,9 +3398,10 @@ static int init_card(struct atm_dev *dev)
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev,
2 * sizeof(u32),
&card->raw_cell_paddr);
card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev,
2 * sizeof(u32),
&card->raw_cell_paddr,
GFP_KERNEL);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
@ -3611,6 +3613,11 @@ static int idt77252_init_one(struct pci_dev *pcidev,
return err;
}
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
return err;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
if (!card) {
printk("idt77252-%d: can't allocate private data\n", index);

View File

@ -1185,8 +1185,8 @@ static int rx_pkt(struct atm_dev *dev)
/* Build the DLE structure */
wr_ptr = iadev->rx_dle_q.write;
wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
len, PCI_DMA_FROMDEVICE);
wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
len, DMA_FROM_DEVICE);
wr_ptr->local_pkt_addr = buf_addr;
wr_ptr->bytes = len; /* We don't know this do we ?? */
wr_ptr->mode = DMA_INT_ENABLE;
@ -1306,8 +1306,8 @@ static void rx_dle_intr(struct atm_dev *dev)
u_short length;
struct ia_vcc *ia_vcc;
pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
len, PCI_DMA_FROMDEVICE);
dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
len, DMA_FROM_DEVICE);
/* no VCC related housekeeping done as yet. lets see */
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
@ -1430,8 +1430,8 @@ static int rx_init(struct atm_dev *dev)
// spin_lock_init(&iadev->rx_lock);
/* Allocate 4k bytes - more aligned than needed (4k boundary) */
dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
&iadev->rx_dle_dma);
dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
&iadev->rx_dle_dma, GFP_KERNEL);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
@ -1631,8 +1631,8 @@ static int rx_init(struct atm_dev *dev)
return 0;
err_free_dle:
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
err_out:
return -ENOMEM;
}
@ -1702,8 +1702,8 @@ static void tx_dle_intr(struct atm_dev *dev)
/* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
PCI_DMA_TODEVICE);
dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
DMA_TO_DEVICE);
}
vcc = ATM_SKB(skb)->vcc;
if (!vcc) {
@ -1917,8 +1917,8 @@ static int tx_init(struct atm_dev *dev)
readw(iadev->seg_reg+SEG_MASK_REG));)
/* Allocate 4k (boundary aligned) bytes */
dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
&iadev->tx_dle_dma);
dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
&iadev->tx_dle_dma, GFP_KERNEL);
if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
goto err_out;
@ -1989,8 +1989,10 @@ static int tx_init(struct atm_dev *dev)
goto err_free_tx_bufs;
}
iadev->tx_buf[i].cpcs = cpcs;
iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
cpcs,
sizeof(*cpcs),
DMA_TO_DEVICE);
}
iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
sizeof(struct desc_tbl_t), GFP_KERNEL);
@ -2198,14 +2200,14 @@ err_free_tx_bufs:
while (--i >= 0) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
pci_unmap_single(iadev->pci, desc->dma_addr,
sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
sizeof(*desc->cpcs), DMA_TO_DEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
err_free_dle:
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
err_out:
return -ENOMEM;
}
@ -2476,20 +2478,20 @@ static void ia_free_tx(IADEV *iadev)
for (i = 0; i < iadev->num_tx_desc; i++) {
struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
pci_unmap_single(iadev->pci, desc->dma_addr,
sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
sizeof(*desc->cpcs), DMA_TO_DEVICE);
kfree(desc->cpcs);
}
kfree(iadev->tx_buf);
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
iadev->tx_dle_dma);
}
static void ia_free_rx(IADEV *iadev)
{
kfree(iadev->rx_open);
pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
iadev->rx_dle_dma);
}
static int ia_start(struct atm_dev *dev)
@ -3009,8 +3011,8 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
/* Build the DLE structure */
wr_ptr = iadev->tx_dle_q.write;
memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
skb->len, PCI_DMA_TODEVICE);
wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
skb->len, DMA_TO_DEVICE);
wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
buf_desc_ptr->buf_start_lo;
/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */

View File

@ -346,7 +346,8 @@ static void lanai_buf_allocate(struct lanai_buffer *buf,
* everything, but the way the lanai uses DMA memory would
* make that a terrific pain. This is much simpler.
*/
buf->start = pci_alloc_consistent(pci, size, &buf->dmaaddr);
buf->start = dma_alloc_coherent(&pci->dev,
size, &buf->dmaaddr, GFP_KERNEL);
if (buf->start != NULL) { /* Success */
/* Lanai requires 256-byte alignment of DMA bufs */
APRINTK((buf->dmaaddr & ~0xFFFFFF00) == 0,
@ -372,8 +373,8 @@ static void lanai_buf_deallocate(struct lanai_buffer *buf,
struct pci_dev *pci)
{
if (buf->start != NULL) {
pci_free_consistent(pci, lanai_buf_size(buf),
buf->start, buf->dmaaddr);
dma_free_coherent(&pci->dev, lanai_buf_size(buf),
buf->start, buf->dmaaddr);
buf->start = buf->end = buf->ptr = NULL;
}
}
@ -681,15 +682,6 @@ static inline int aal5_size(int size)
return cells * 48;
}
/* How many bytes can we send if we have "space" space, assuming we have
* to send full cells
*/
static inline int aal5_spacefor(int space)
{
int cells = space / 48;
return cells * 48;
}
/* -------------------- FREE AN ATM SKB: */
static inline void lanai_free_skb(struct atm_vcc *atmvcc, struct sk_buff *skb)
@ -1953,12 +1945,7 @@ static int lanai_pci_start(struct lanai_dev *lanai)
return -ENXIO;
}
pci_set_master(pci);
if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING DEV_LABEL
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
if (pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) != 0) {
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING DEV_LABEL
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;

View File

@ -252,10 +252,10 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
}
idr_destroy(&card->idr);
pci_free_consistent(card->pcidev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
pci_free_consistent(card->pcidev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
free_irq(card->pcidev->irq, card);
iounmap(card->membase);
kfree(card);
@ -370,8 +370,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
ns_init_card_error(card, error);
return error;
}
if ((pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0) ||
(pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)) != 0)) {
if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING
"nicstar%d: No suitable DMA available.\n", i);
error = 2;
@ -535,9 +534,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
writel(0x00000000, card->membase + VPM);
/* Initialize TSQ */
card->tsq.org = pci_alloc_consistent(card->pcidev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
&card->tsq.dma);
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
&card->tsq.dma, GFP_KERNEL);
if (card->tsq.org == NULL) {
printk("nicstar%d: can't allocate TSQ.\n", i);
error = 10;
@ -554,9 +553,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base);
/* Initialize RSQ */
card->rsq.org = pci_alloc_consistent(card->pcidev,
NS_RSQSIZE + NS_RSQ_ALIGNMENT,
&card->rsq.dma);
card->rsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_RSQSIZE + NS_RSQ_ALIGNMENT,
&card->rsq.dma, GFP_KERNEL);
if (card->rsq.org == NULL) {
printk("nicstar%d: can't allocate RSQ.\n", i);
error = 11;
@ -874,7 +873,8 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
if (!scq)
return NULL;
scq->org = pci_alloc_consistent(card->pcidev, 2 * size, &scq->dma);
scq->org = dma_alloc_coherent(&card->pcidev->dev,
2 * size, &scq->dma, GFP_KERNEL);
if (!scq->org) {
kfree(scq);
return NULL;
@ -936,10 +936,10 @@ static void free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc)
}
}
kfree(scq->skb);
pci_free_consistent(card->pcidev,
2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
VBR_SCQSIZE : CBR_SCQSIZE),
scq->org, scq->dma);
dma_free_coherent(&card->pcidev->dev,
2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ?
VBR_SCQSIZE : CBR_SCQSIZE),
scq->org, scq->dma);
kfree(scq);
}
@ -957,11 +957,11 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
handle2 = NULL;
addr2 = 0;
handle1 = skb;
addr1 = pci_map_single(card->pcidev,
addr1 = dma_map_single(&card->pcidev->dev,
skb->data,
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
NS_PRV_DMA(skb) = addr1; /* save so we can unmap later */
#ifdef GENERAL_DEBUG
@ -1670,8 +1670,8 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
ATM_SKB(skb)->vcc = vcc;
NS_PRV_DMA(skb) = pci_map_single(card->pcidev, skb->data,
skb->len, PCI_DMA_TODEVICE);
NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (vcc->qos.aal == ATM_AAL5) {
buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */
@ -1930,10 +1930,10 @@ static void drain_scq(ns_dev * card, scq_info * scq, int pos)
XPRINTK("nicstar%d: freeing skb at 0x%p (index %d).\n",
card->index, skb, i);
if (skb != NULL) {
pci_unmap_single(card->pcidev,
dma_unmap_single(&card->pcidev->dev,
NS_PRV_DMA(skb),
skb->len,
PCI_DMA_TODEVICE);
DMA_TO_DEVICE);
vcc = ATM_SKB(skb)->vcc;
if (vcc && vcc->pop != NULL) {
vcc->pop(vcc, skb);
@ -1992,16 +1992,16 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
return;
}
idr_remove(&card->idr, id);
pci_dma_sync_single_for_cpu(card->pcidev,
NS_PRV_DMA(skb),
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
PCI_DMA_FROMDEVICE);
pci_unmap_single(card->pcidev,
dma_sync_single_for_cpu(&card->pcidev->dev,
NS_PRV_DMA(skb),
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
DMA_FROM_DEVICE);
dma_unmap_single(&card->pcidev->dev,
NS_PRV_DMA(skb),
(NS_PRV_BUFTYPE(skb) == BUF_SM
? NS_SMSKBSIZE : NS_LGSKBSIZE),
PCI_DMA_FROMDEVICE);
DMA_FROM_DEVICE);
vpi = ns_rsqe_vpi(rsqe);
vci = ns_rsqe_vci(rsqe);
if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) {

View File

@ -785,8 +785,8 @@ static void solos_bh(unsigned long card_arg)
skb = card->rx_skb[port];
card->rx_skb[port] = NULL;
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, DMA_FROM_DEVICE);
header = (void *)skb->data;
size = le16_to_cpu(header->size);
@ -872,8 +872,8 @@ static void solos_bh(unsigned long card_arg)
struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
if (skb) {
SKB_CB(skb)->dma_addr =
pci_map_single(card->dev, skb->data,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
dma_map_single(&card->dev->dev, skb->data,
RX_DMA_SIZE, DMA_FROM_DEVICE);
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + RX_DMA_ADDR(port));
card->rx_skb[port] = skb;
@ -1069,8 +1069,8 @@ static uint32_t fpga_tx(struct solos_card *card)
if (tx_pending & 1) {
struct sk_buff *oldskb = card->tx_skb[port];
if (oldskb) {
pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
oldskb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->dev->dev, SKB_CB(oldskb)->dma_addr,
oldskb->len, DMA_TO_DEVICE);
card->tx_skb[port] = NULL;
}
spin_lock(&card->tx_queue_lock);
@ -1089,8 +1089,8 @@ static uint32_t fpga_tx(struct solos_card *card)
data = card->dma_bounce + (BUF_SIZE * port);
memcpy(data, skb->data, skb->len);
}
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, data,
skb->len, PCI_DMA_TODEVICE);
SKB_CB(skb)->dma_addr = dma_map_single(&card->dev->dev, data,
skb->len, DMA_TO_DEVICE);
card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
@ -1210,7 +1210,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto out;
}
err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
goto out;
@ -1411,14 +1411,14 @@ static void atm_remove(struct solos_card *card)
skb = card->rx_skb[i];
if (skb) {
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, PCI_DMA_FROMDEVICE);
dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
RX_DMA_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
skb = card->tx_skb[i];
if (skb) {
pci_unmap_single(card->dev, SKB_CB(skb)->dma_addr,
skb->len, PCI_DMA_TODEVICE);
dma_unmap_single(&card->dev->dev, SKB_CB(skb)->dma_addr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
while ((skb = skb_dequeue(&card->tx_queue[i])))

View File

@ -1306,19 +1306,20 @@ static int zatm_start(struct atm_dev *dev)
if (!mbx_entries[i])
continue;
mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma);
mbx = dma_alloc_coherent(&pdev->dev,
2 * MBX_SIZE(i), &mbx_dma, GFP_KERNEL);
if (!mbx) {
error = -ENOMEM;
goto out;
}
/*
* Alignment provided by pci_alloc_consistent() isn't enough
* Alignment provided by dma_alloc_coherent() isn't enough
* for this device.
*/
if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
printk(KERN_ERR DEV_LABEL "(itf %d): system "
"bus incompatible with driver\n", dev->number);
pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma);
dma_free_coherent(&pdev->dev, 2*MBX_SIZE(i), mbx, mbx_dma);
error = -ENODEV;
goto out;
}
@ -1354,9 +1355,9 @@ out_tx:
kfree(zatm_dev->tx_map);
out:
while (i-- > 0) {
pci_free_consistent(pdev, 2*MBX_SIZE(i),
(void *)zatm_dev->mbx_start[i],
zatm_dev->mbx_dma[i]);
dma_free_coherent(&pdev->dev, 2 * MBX_SIZE(i),
(void *)zatm_dev->mbx_start[i],
zatm_dev->mbx_dma[i]);
}
free_irq(zatm_dev->irq, dev);
goto done;
@ -1608,6 +1609,10 @@ static int zatm_init_one(struct pci_dev *pci_dev,
if (ret < 0)
goto out_disable;
ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto out_disable;
zatm_dev->pci_dev = pci_dev;
dev->dev_data = zatm_dev;
zatm_dev->copper = (int)ent->driver_data;

View File

@ -25,22 +25,18 @@ struct bcma_bus;
bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
int timeout);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
void bcma_init_bus(struct bcma_bus *bus);
int bcma_bus_register(struct bcma_bus *bus);
void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
int __init bcma_bus_early_register(struct bcma_bus *bus);
#ifdef CONFIG_PM
int bcma_bus_suspend(struct bcma_bus *bus);
int bcma_bus_resume(struct bcma_bus *bus);
#endif
/* scan.c */
void bcma_detect_chip(struct bcma_bus *bus);
int bcma_bus_scan(struct bcma_bus *bus);
int __init bcma_bus_scan_early(struct bcma_bus *bus,
struct bcma_device_id *match,
struct bcma_device *core);
void bcma_init_bus(struct bcma_bus *bus);
/* sprom.c */
int bcma_sprom_get(struct bcma_bus *bus);
@ -111,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#else
static inline bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
return false;
}
static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
}
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
#ifdef CONFIG_BCMA_DRIVER_GPIO

View File

@ -79,7 +79,9 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
/* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP clock */
/* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP
* clock
*/
return bcma_chipco_get_alp_clock(cc) / 4000;
else
/* based on 32KHz ILP clock */
@ -97,7 +99,8 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
wdt.driver_data = cc;
wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
wdt.max_timer_ms = bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
wdt.max_timer_ms =
bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
cc->core->bus->num, &wdt,
@ -175,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{
u32 maxt;
enum bcma_clkmode clkmode;
maxt = bcma_chipco_watchdog_get_max_timer(cc);
if (cc->capabilities & BCMA_CC_CAP_PMU) {
@ -185,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = maxt;
bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else {
clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC;
bcma_core_set_clockmode(cc->core, clkmode);
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
bcma_core_set_clockmode(cc->core,
ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
if (ticks > maxt)
ticks = maxt;
/* instant NMI */
@ -335,7 +342,8 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
| BCMA_CC_CORECTL_UARTCLKEN);
}
} else {
bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n",
ccrev);
return;
}

View File

@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
return bcma_pcie_mdio_read(pc, device, address);
}
/**************************************************
* Early init.
**************************************************/
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/*
* Apply some early fixes required before accessing SPROM.
* See also si_pci_fixcfg.
*/
void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
{
if (pc->early_setup_done)
return;
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
if (pc->hostmode)
goto out;
bcma_core_pci_fixcfg(pc);
out:
pc->early_setup_done = true;
}
/**************************************************
* Workarounds.
**************************************************/
@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
}
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc);
}
@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
if (pc->setup_done)
return;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
bcma_core_pci_early_init(pc);
if (pc->hostmode)
bcma_core_pci_hostmode_init(pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
if (!pc->hostmode)
else
bcma_core_pci_clientmode_init(pc);
}

View File

@ -13,10 +13,12 @@
static void bcma_host_pci_switch_core(struct bcma_device *core)
{
int win2 = core->bus->host_is_pcie2 ?
BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
core->addr);
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
core->wrap);
pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
core->bus->mapped_core = core;
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}

View File

@ -193,7 +193,7 @@ int __init bcma_host_soc_init(struct bcma_soc *soc)
int err;
/* Scan bus and initialize it */
err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips);
err = bcma_bus_early_register(bus);
if (err)
iounmap(bus->mmio);

View File

@ -268,6 +268,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
}
}
void bcma_init_bus(struct bcma_bus *bus)
{
mutex_lock(&bcma_buses_mutex);
bus->num = bcma_bus_next_num++;
mutex_unlock(&bcma_buses_mutex);
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
bcma_detect_chip(bus);
}
static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
@ -356,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
if (!core->dev_registered)
continue;
list_del(&core->list);
if (core->dev_registered)
device_unregister(&core->dev);
device_unregister(&core->dev);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog);
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
}
}
int bcma_bus_register(struct bcma_bus *bus)
@ -369,10 +388,6 @@ int bcma_bus_register(struct bcma_bus *bus)
int err;
struct bcma_device *core;
mutex_lock(&bcma_buses_mutex);
bus->num = bcma_bus_next_num++;
mutex_unlock(&bcma_buses_mutex);
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
@ -387,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_early_init(&bus->drv_cc);
}
/* Early init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
/* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id))
@ -459,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus)
{
struct bcma_device *cores[3];
int err;
err = bcma_gpio_unregister(&bus->drv_cc);
@ -470,46 +491,23 @@ void bcma_bus_unregister(struct bcma_bus *bus)
bcma_core_chipcommon_b_free(&bus->drv_cc_b);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
bcma_unregister_cores(bus);
kfree(cores[2]);
kfree(cores[1]);
kfree(cores[0]);
}
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips)
/*
* This is a special version of bus registration function designed for SoCs.
* It scans bus and performs basic initialization of main cores only.
* Please note it requires memory allocation, however it won't try to sleep.
*/
int __init bcma_bus_early_register(struct bcma_bus *bus)
{
int err;
struct bcma_device *core;
struct bcma_device_id match;
match.manuf = BCMA_MANUF_BCM;
match.id = bcma_cc_core_id(bus);
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for chip common core */
err = bcma_bus_scan_early(bus, &match, core_cc);
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan for common core: %d\n", err);
return -1;
}
match.manuf = BCMA_MANUF_MIPS;
match.id = BCMA_CORE_MIPS_74K;
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for mips core */
err = bcma_bus_scan_early(bus, &match, core_mips);
if (err) {
bcma_err(bus, "Failed to scan for mips core: %d\n", err);
bcma_err(bus, "Failed to scan bus: %d\n", err);
return -1;
}

View File

@ -435,15 +435,12 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
return 0;
}
void bcma_init_bus(struct bcma_bus *bus)
void bcma_detect_chip(struct bcma_bus *bus)
{
s32 tmp;
struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
char chip_id[8];
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
@ -464,6 +461,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
int err, core_num = 0;
/* Skip if bus was already scanned (e.g. during early register) */
if (bus->nr_cores)
return 0;
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
@ -522,61 +523,3 @@ out:
return err;
}
int __init bcma_bus_scan_early(struct bcma_bus *bus,
struct bcma_device_id *match,
struct bcma_device *core)
{
u32 erombase;
u32 __iomem *eromptr, *eromend;
int err = -ENODEV;
int core_num = 0;
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
if (!eromptr)
return -ENOMEM;
} else {
eromptr = bus->mmio;
}
eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
bcma_scan_switch_core(bus, erombase);
while (eromptr < eromend) {
memset(core, 0, sizeof(*core));
INIT_LIST_HEAD(&core->list);
core->bus = bus;
err = bcma_get_next_core(bus, &eromptr, match, core_num, core);
if (err == -ENODEV) {
core_num++;
continue;
} else if (err == -ENXIO)
continue;
else if (err == -ESPIPE)
break;
else if (err < 0)
goto out;
core->core_index = core_num++;
bus->nr_cores++;
bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
list_add_tail(&core->list, &bus->cores);
err = 0;
break;
}
out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr);
return err;
}

View File

@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus)
u16 offset = BCMA_CC_SPROM;
u16 *sprom;
size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
SSB_SPROMSIZE_WORDS_R10, };
SSB_SPROMSIZE_WORDS_R10,
SSB_SPROMSIZE_WORDS_R11, };
int i, err = 0;
if (!bus->drv_cc.core)

View File

@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
{ USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },
/* Atheros AR5BBU12 with sflash firmware */
@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
#define FW_HDR_SIZE 20
#define TIMEGAP_USEC_MIN 50
#define TIMEGAP_USEC_MAX 100
static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
pipe = usb_sndbulkpipe(udev, 0x02);
while (count) {
/* workaround the compatibility issue with xHCI controller*/
usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
size = min_t(uint, count, BULK_SIZE);
memcpy(send_buf, firmware->data + sent, size);
@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev,
pipe = usb_sndbulkpipe(udev, 0x02);
while (count) {
/* workaround the compatibility issue with xHCI controller*/
usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX);
size = min_t(uint, count, BULK_SIZE);
memcpy(send_buf, firmware->data + sent, size);

View File

@ -696,6 +696,8 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);

View File

@ -28,9 +28,9 @@
#define BTM_UPLD_SIZE 2312
/* Time to wait until Host Sleep state change in millisecond */
#define WAIT_UNTIL_HS_STATE_CHANGED 5000
#define WAIT_UNTIL_HS_STATE_CHANGED msecs_to_jiffies(5000)
/* Time to wait for command response in millisecond */
#define WAIT_UNTIL_CMD_RESP 5000
#define WAIT_UNTIL_CMD_RESP msecs_to_jiffies(5000)
enum rdwr_status {
RDWR_STATUS_SUCCESS = 0,
@ -104,6 +104,7 @@ struct btmrvl_private {
#ifdef CONFIG_DEBUG_FS
void *debugfs_data;
#endif
bool surprise_removed;
};
#define MRVL_VENDOR_PKT 0xFE

View File

@ -178,6 +178,11 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
struct sk_buff *skb;
struct hci_command_hdr *hdr;
if (priv->surprise_removed) {
BT_ERR("Card is removed");
return -EFAULT;
}
skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
if (skb == NULL) {
BT_ERR("No free skb");
@ -202,10 +207,14 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
wake_up_interruptible(&priv->main_thread.wait_q);
if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
priv->adapter->cmd_complete,
msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
priv->adapter->cmd_complete ||
priv->surprise_removed,
WAIT_UNTIL_CMD_RESP))
return -ETIMEDOUT;
if (priv->surprise_removed)
return -EFAULT;
return 0;
}
@ -287,9 +296,10 @@ int btmrvl_enable_hs(struct btmrvl_private *priv)
}
ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q,
adapter->hs_state,
msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED));
if (ret < 0) {
adapter->hs_state ||
priv->surprise_removed,
WAIT_UNTIL_HS_STATE_CHANGED);
if (ret < 0 || priv->surprise_removed) {
BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d",
ret, adapter->hs_state, adapter->ps_state,
adapter->wakeup_tries);
@ -538,8 +548,11 @@ static int btmrvl_check_device_tree(struct btmrvl_private *priv)
static int btmrvl_setup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
int ret;
btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
ret = btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
if (ret)
return ret;
priv->btmrvl_dev.gpio_gap = 0xffff;
@ -597,7 +610,7 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
if (kthread_should_stop()) {
if (kthread_should_stop() || priv->surprise_removed) {
BT_DBG("main_thread: break from main thread");
break;
}
@ -616,6 +629,11 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up");
if (kthread_should_stop() || priv->surprise_removed) {
BT_DBG("main_thread: break from main thread");
break;
}
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;

View File

@ -573,7 +573,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
offset += txlen;
} while (true);
BT_DBG("FW download over, size %d bytes", offset);
BT_INFO("FW download over, size %d bytes", offset);
ret = 0;
@ -798,6 +798,9 @@ static void btmrvl_sdio_interrupt(struct sdio_func *func)
priv = card->priv;
if (priv->surprise_removed)
return;
if (card->reg->int_read_to_clear)
ret = btmrvl_sdio_read_to_clear(card, &ireg);
else
@ -1466,6 +1469,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func)
btmrvl_sdio_disable_host_int(card);
}
BT_DBG("unregester dev");
card->priv->surprise_removed = true;
btmrvl_sdio_unregister_dev(card);
btmrvl_remove_card(card->priv);
}

View File

@ -28,7 +28,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#define VERSION "0.6"
#define VERSION "0.7"
static bool disable_scofix;
static bool force_scofix;
@ -49,11 +49,17 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BOOT 0x200
#define BTUSB_BCM_PATCHRAM 0x400
#define BTUSB_MARVELL 0x800
#define BTUSB_SWAVE 0x1000
#define BTUSB_INTEL_NEW 0x2000
#define BTUSB_AMP 0x4000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
/* Generic Bluetooth AMP device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
@ -85,7 +91,7 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x05ac, 0x8281) },
/* AVM BlueFRITZ! USB v2.0 */
{ USB_DEVICE(0x057c, 0x3800) },
{ USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_SWAVE },
/* Bluetooth Ultraport Module from IBM */
{ USB_DEVICE(0x04bf, 0x030a) },
@ -109,16 +115,24 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x13d3, 0x3404),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM20702B0 (Dynex/Insignia) */
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
/* Foxconn - Hon Hai */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Lite-On Technology - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* ASUSTek Computer - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
/* Belkin F8065bf - Broadcom based */
{ USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
@ -188,6 +202,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
@ -237,6 +252,9 @@ static const struct usb_device_id blacklist_table[] = {
/* CONWISE Technology based adapters with buggy SCO support */
{ USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
{ USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
/* Digianswer devices */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
{ USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE },
@ -249,14 +267,19 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x16d3, 0x0002),
.driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC },
/* Intel Bluetooth device */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
/* Marvell device */
/* Marvell Bluetooth devices */
{ USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL },
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
{ } /* Terminating entry */
};
@ -267,6 +290,11 @@ static const struct usb_device_id blacklist_table[] = {
#define BTUSB_ISOC_RUNNING 2
#define BTUSB_SUSPENDING 3
#define BTUSB_DID_ISO_RESUME 4
#define BTUSB_BOOTLOADER 5
#define BTUSB_DOWNLOADING 6
#define BTUSB_FIRMWARE_LOADED 7
#define BTUSB_FIRMWARE_FAILED 8
#define BTUSB_BOOTING 9
struct btusb_data {
struct hci_dev *hdev;
@ -300,14 +328,26 @@ struct btusb_data {
struct usb_endpoint_descriptor *isoc_rx_ep;
__u8 cmdreq_type;
__u8 cmdreq;
unsigned int sco_num;
int isoc_altsetting;
int suspend_count;
int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb);
int (*recv_bulk)(struct btusb_data *data, void *buffer, int count);
};
static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout,
unsigned mode)
{
might_sleep();
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout,
mode, timeout);
}
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@ -370,7 +410,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
if (bt_cb(skb)->expect == 0) {
/* Complete frame */
hci_recv_frame(data->hdev, skb);
data->recv_event(data->hdev, skb);
skb = NULL;
}
}
@ -952,7 +992,7 @@ static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb)
}
dr->bRequestType = data->cmdreq_type;
dr->bRequest = 0;
dr->bRequest = data->cmdreq;
dr->wIndex = 0;
dr->wValue = 0;
dr->wLength = __cpu_to_le16(skb->len);
@ -1290,6 +1330,26 @@ struct intel_version {
u8 fw_patch_num;
} __packed;
struct intel_boot_params {
__u8 status;
__u8 otp_format;
__u8 otp_content;
__u8 otp_patch;
__le16 dev_revid;
__u8 secure_boot;
__u8 key_from_hdr;
__u8 key_type;
__u8 otp_lock;
__u8 api_lock;
__u8 debug_lock;
bdaddr_t otp_bdaddr;
__u8 min_fw_build_nn;
__u8 min_fw_build_cw;
__u8 min_fw_build_yy;
__u8 limited_cce;
__u8 unlocked_state;
} __packed;
static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@ -1698,6 +1758,562 @@ exit_mfg_deactivate:
return 0;
}
static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
{
struct sk_buff *skb;
struct hci_event_hdr *hdr;
struct hci_ev_cmd_complete *evt;
skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
hdr->evt = HCI_EV_CMD_COMPLETE;
hdr->plen = sizeof(*evt) + 1;
evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
evt->ncmd = 0x01;
evt->opcode = cpu_to_le16(opcode);
*skb_put(skb, 1) = 0x00;
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
return hci_recv_frame(hdev, skb);
}
static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer,
int count)
{
/* When the device is in bootloader mode, then it can send
* events via the bulk endpoint. These events are treated the
* same way as the ones received from the interrupt endpoint.
*/
if (test_bit(BTUSB_BOOTLOADER, &data->flags))
return btusb_recv_intr(data, buffer, count);
return btusb_recv_bulk(data, buffer, count);
}
static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
struct hci_event_hdr *hdr = (void *)skb->data;
/* When the firmware loading completes the device sends
* out a vendor specific event indicating the result of
* the firmware loading.
*/
if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
skb->data[2] == 0x06) {
if (skb->data[3] != 0x00)
test_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
if (test_and_clear_bit(BTUSB_DOWNLOADING,
&data->flags) &&
test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
smp_mb__after_atomic();
wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
}
}
/* When switching to the operational firmware the device
* sends a vendor specific event indicating that the bootup
* completed.
*/
if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
skb->data[2] == 0x02) {
if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
smp_mb__after_atomic();
wake_up_bit(&data->flags, BTUSB_BOOTING);
}
}
}
return hci_recv_frame(hdev, skb);
}
static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
struct urb *urb;
BT_DBG("%s", hdev->name);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
struct hci_command_hdr *cmd = (void *)skb->data;
__u16 opcode = le16_to_cpu(cmd->opcode);
/* When in bootloader mode and the command 0xfc09
* is received, it needs to be send down the
* bulk endpoint. So allocate a bulk URB instead.
*/
if (opcode == 0xfc09)
urb = alloc_bulk_urb(hdev, skb);
else
urb = alloc_ctrl_urb(hdev, skb);
/* When the 0xfc01 command is issued to boot into
* the operational firmware, it will actually not
* send a command complete event. To keep the flow
* control working inject that event here.
*/
if (opcode == 0xfc01)
inject_cmd_complete(hdev, opcode);
} else {
urb = alloc_ctrl_urb(hdev, skb);
}
if (IS_ERR(urb))
return PTR_ERR(urb);
hdev->stat.cmd_tx++;
return submit_or_queue_tx_urb(hdev, urb);
case HCI_ACLDATA_PKT:
urb = alloc_bulk_urb(hdev, skb);
if (IS_ERR(urb))
return PTR_ERR(urb);
hdev->stat.acl_tx++;
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
if (hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
if (IS_ERR(urb))
return PTR_ERR(urb);
hdev->stat.sco_tx++;
return submit_tx_urb(hdev, urb);
}
return -EILSEQ;
}
static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
u32 plen, const void *param)
{
while (plen > 0) {
struct sk_buff *skb;
u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
cmd_param[0] = fragment_type;
memcpy(cmd_param + 1, param, fragment_len);
skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
cmd_param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
kfree_skb(skb);
plen -= fragment_len;
param += fragment_len;
}
return 0;
}
static void btusb_intel_version_info(struct hci_dev *hdev,
struct intel_version *ver)
{
const char *variant;
switch (ver->fw_variant) {
case 0x06:
variant = "Bootloader";
break;
case 0x23:
variant = "Firmware";
break;
default:
return;
}
BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
}
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
0x00, 0x08, 0x04, 0x00 };
struct btusb_data *data = hci_get_drvdata(hdev);
struct sk_buff *skb;
struct intel_version *ver;
struct intel_boot_params *params;
const struct firmware *fw;
const u8 *fw_ptr;
char fwname[64];
ktime_t calltime, delta, rettime;
unsigned long long duration;
int err;
BT_DBG("%s", hdev->name);
calltime = ktime_get();
/* Read the Intel version information to determine if the device
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Reading Intel version information failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
BT_ERR("%s: Intel version event size mismatch", hdev->name);
kfree_skb(skb);
return -EILSEQ;
}
ver = (struct intel_version *)skb->data;
if (ver->status) {
BT_ERR("%s: Intel version command failure (%02x)",
hdev->name, ver->status);
err = -bt_to_errno(ver->status);
kfree_skb(skb);
return err;
}
/* The hardware platform number has a fixed value of 0x37 and
* for now only accept this single value.
*/
if (ver->hw_platform != 0x37) {
BT_ERR("%s: Unsupported Intel hardware platform (%u)",
hdev->name, ver->hw_platform);
kfree_skb(skb);
return -EINVAL;
}
/* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
* supported by this firmware loading method. This check has been
* put in place to ensure correct forward compatibility options
* when newer hardware variants come along.
*/
if (ver->hw_variant != 0x0b) {
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver->hw_variant);
kfree_skb(skb);
return -EINVAL;
}
btusb_intel_version_info(hdev, ver);
/* The firmware variant determines if the device is in bootloader
* mode or is running operational firmware. The value 0x06 identifies
* the bootloader and the value 0x23 identifies the operational
* firmware.
*
* When the operational firmware is already present, then only
* the check for valid Bluetooth device address is needed. This
* determines if the device will be added as configured or
* unconfigured controller.
*
* It is not possible to use the Secure Boot Parameters in this
* case since that command is only available in bootloader mode.
*/
if (ver->fw_variant == 0x23) {
kfree_skb(skb);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
btusb_check_bdaddr_intel(hdev);
return 0;
}
/* If the device is not in bootloader mode, then the only possible
* choice is to return an error and abort the device initialization.
*/
if (ver->fw_variant != 0x06) {
BT_ERR("%s: Unsupported Intel firmware variant (%u)",
hdev->name, ver->fw_variant);
kfree_skb(skb);
return -ENODEV;
}
kfree_skb(skb);
/* Read the secure boot parameters to identify the operating
* details of the bootloader.
*/
skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
hdev->name, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*params)) {
BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
kfree_skb(skb);
return -EILSEQ;
}
params = (struct intel_boot_params *)skb->data;
if (params->status) {
BT_ERR("%s: Intel boot parameters command failure (%02x)",
hdev->name, params->status);
err = -bt_to_errno(params->status);
kfree_skb(skb);
return err;
}
BT_INFO("%s: Device revision is %u", hdev->name,
le16_to_cpu(params->dev_revid));
BT_INFO("%s: Secure boot is %s", hdev->name,
params->secure_boot ? "enabled" : "disabled");
BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
params->min_fw_build_nn, params->min_fw_build_cw,
2000 + params->min_fw_build_yy);
/* It is required that every single firmware fragment is acknowledged
* with a command complete event. If the boot parameters indicate
* that this bootloader does not send them, then abort the setup.
*/
if (params->limited_cce != 0x00) {
BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
hdev->name, params->limited_cce);
kfree_skb(skb);
return -EINVAL;
}
/* If the OTP has no valid Bluetooth device address, then there will
* also be no valid address for the operational firmware.
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
BT_INFO("%s: No device address configured", hdev->name);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
}
/* With this Intel bootloader only the hardware variant and device
* revision information are used to select the right firmware.
*
* Currently this bootloader support is limited to hardware variant
* iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
*/
snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
le16_to_cpu(params->dev_revid));
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
BT_ERR("%s: Failed to load Intel firmware file (%d)",
hdev->name, err);
kfree_skb(skb);
return err;
}
BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
kfree_skb(skb);
if (fw->size < 644) {
BT_ERR("%s: Invalid size of firmware file (%zu)",
hdev->name, fw->size);
err = -EBADF;
goto done;
}
set_bit(BTUSB_DOWNLOADING, &data->flags);
/* Start the firmware download transaction with the Init fragment
* represented by the 128 bytes of CSS header.
*/
err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
if (err < 0) {
BT_ERR("%s: Failed to send firmware header (%d)",
hdev->name, err);
goto done;
}
/* Send the 256 bytes of public key information from the firmware
* as the PKey fragment.
*/
err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
if (err < 0) {
BT_ERR("%s: Failed to send firmware public key (%d)",
hdev->name, err);
goto done;
}
/* Send the 256 bytes of signature information from the firmware
* as the Sign fragment.
*/
err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
if (err < 0) {
BT_ERR("%s: Failed to send firmware signature (%d)",
hdev->name, err);
goto done;
}
fw_ptr = fw->data + 644;
while (fw_ptr - fw->data < fw->size) {
struct hci_command_hdr *cmd = (void *)fw_ptr;
u8 cmd_len;
cmd_len = sizeof(*cmd) + cmd->plen;
/* Send each command from the firmware data buffer as
* a single Data fragment.
*/
err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
if (err < 0) {
BT_ERR("%s: Failed to send firmware data (%d)",
hdev->name, err);
goto done;
}
fw_ptr += cmd_len;
}
set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
/* Before switching the device into operational mode and with that
* booting the loaded firmware, wait for the bootloader notification
* that all fragments have been successfully received.
*
* When the event processing receives the notification, then the
* BTUSB_DOWNLOADING flag will be cleared.
*
* The firmware loading should not take longer than 5 seconds
* and thus just timeout if that happens and fail the setup
* of this device.
*/
err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
msecs_to_jiffies(5000),
TASK_INTERRUPTIBLE);
if (err == 1) {
BT_ERR("%s: Firmware loading interrupted", hdev->name);
err = -EINTR;
goto done;
}
if (err) {
BT_ERR("%s: Firmware loading timeout", hdev->name);
err = -ETIMEDOUT;
goto done;
}
if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) {
BT_ERR("%s: Firmware loading failed", hdev->name);
err = -ENOEXEC;
goto done;
}
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
done:
release_firmware(fw);
if (err < 0)
return err;
calltime = ktime_get();
set_bit(BTUSB_BOOTING, &data->flags);
skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
HCI_INIT_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
kfree_skb(skb);
/* The bootloader will not indicate when the device is ready. This
* is done by the operational firmware sending bootup notification.
*
* Booting into operational firmware should not take longer than
* 1 second. However if that happens, then just fail the setup
* since something went wrong.
*/
BT_INFO("%s: Waiting for device to boot", hdev->name);
err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING,
msecs_to_jiffies(1000),
TASK_INTERRUPTIBLE);
if (err == 1) {
BT_ERR("%s: Device boot interrupted", hdev->name);
return -EINTR;
}
if (err) {
BT_ERR("%s: Device boot timeout", hdev->name);
return -ETIMEDOUT;
}
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
clear_bit(BTUSB_BOOTLOADER, &data->flags);
return 0;
}
static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
{
struct sk_buff *skb;
u8 type = 0x00;
BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Reset after hardware error failed (%ld)",
hdev->name, PTR_ERR(skb));
return;
}
kfree_skb(skb);
skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
hdev->name, PTR_ERR(skb));
return;
}
if (skb->len != 13) {
BT_ERR("%s: Exception info size mismatch", hdev->name);
kfree_skb(skb);
return;
}
if (skb->data[0] != 0x00) {
BT_ERR("%s: Exception info command failure (%02x)",
hdev->name, skb->data[0]);
kfree_skb(skb);
return;
}
BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
kfree_skb(skb);
}
static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
@ -1943,6 +2559,31 @@ static int btusb_set_bdaddr_bcm(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
u8 buf[10];
long ret;
buf[0] = 0x01;
buf[1] = 0x01;
buf[2] = 0x00;
buf[3] = sizeof(bdaddr_t);
memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
BT_ERR("%s: Change address command failed (%ld)",
hdev->name, ret);
return ret;
}
kfree_skb(skb);
return 0;
}
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@ -2003,7 +2644,13 @@ static int btusb_probe(struct usb_interface *intf,
if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep)
return -ENODEV;
data->cmdreq_type = USB_TYPE_CLASS;
if (id->driver_info & BTUSB_AMP) {
data->cmdreq_type = USB_TYPE_CLASS | 0x01;
data->cmdreq = 0x2b;
} else {
data->cmdreq_type = USB_TYPE_CLASS;
data->cmdreq = 0x00;
}
data->udev = interface_to_usbdev(intf);
data->intf = intf;
@ -2019,7 +2666,14 @@ static int btusb_probe(struct usb_interface *intf,
init_usb_anchor(&data->isoc_anchor);
spin_lock_init(&data->rxlock);
data->recv_bulk = btusb_recv_bulk;
if (id->driver_info & BTUSB_INTEL_NEW) {
data->recv_event = btusb_recv_event_intel;
data->recv_bulk = btusb_recv_bulk_intel;
set_bit(BTUSB_BOOTLOADER, &data->flags);
} else {
data->recv_event = hci_recv_frame;
data->recv_bulk = btusb_recv_bulk;
}
hdev = hci_alloc_dev();
if (!hdev)
@ -2028,6 +2682,11 @@ static int btusb_probe(struct usb_interface *intf,
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
if (id->driver_info & BTUSB_AMP)
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_BREDR;
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
@ -2050,16 +2709,40 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_INTEL) {
hdev->setup = btusb_setup_intel;
hdev->set_bdaddr = btusb_set_bdaddr_intel;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_INTEL_NEW) {
hdev->send = btusb_send_frame_intel;
hdev->setup = btusb_setup_intel_new;
hdev->hw_error = btusb_hw_error_intel;
hdev->set_bdaddr = btusb_set_bdaddr_intel;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_MARVELL)
hdev->set_bdaddr = btusb_set_bdaddr_marvell;
if (id->driver_info & BTUSB_SWAVE) {
set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
}
if (id->driver_info & BTUSB_INTEL_BOOT)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
if (id->driver_info & BTUSB_ATH3012) {
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
}
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
} else {
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
}
if (!reset)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
@ -2153,7 +2836,6 @@ static void btusb_disconnect(struct usb_interface *intf)
else if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
btusb_free_frags(data);
hci_free_dev(hdev);
}

View File

@ -190,7 +190,7 @@ PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
PNAME(mux_uart3_p) = { "uart3_src", "uart3_frac", "xin24m" };
PNAME(mux_uart4_p) = { "uart4_src", "uart4_frac", "xin24m" };
PNAME(mux_cif_out_p) = { "cif_src", "xin24m" };
PNAME(mux_macref_p) = { "mac_src", "ext_gmac" };
PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
@ -575,18 +575,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(2), 5, GFLAGS),
MUX(0, "macref", mux_macref_p, 0,
MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
GATE(0, "sclk_macref_out", "macref", 0,
GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 3, GFLAGS),
GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 2, GFLAGS),
GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 0, GFLAGS),
GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
RK3288_CLKGATE_CON(5), 1, GFLAGS),
COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,

View File

@ -15,6 +15,7 @@
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>

View File

@ -235,19 +235,19 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
static void set_emss(struct c4iw_ep *ep, u16 opt)
{
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
((AF_INET == ep->com.remote_addr.ss_family) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
sizeof(struct tcphdr);
ep->mss = ep->emss;
if (GET_TCPOPT_TSTAMP(opt))
if (TCPOPT_TSTAMP_G(opt))
ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
TCPOPT_MSS_G(opt), ep->mss, ep->emss);
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
ep->mss, ep->emss);
}
@ -652,29 +652,29 @@ static int send_connect(struct c4iw_ep *ep)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
DELACK(1) |
DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps)
opt2 |= TSTAMPS_EN(1);
opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack)
opt2 |= SACK_EN(1);
opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@ -1042,7 +1042,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_ep *ep;
struct cpl_act_establish *req = cplhdr(skb);
unsigned int tid = GET_TID(req);
unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
ep = lookup_atid(t, atid);
@ -1258,8 +1258,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
ep->hwtid));
req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
F_RX_DACK_CHANGE |
V_RX_DACK_MODE(dack_mode));
RX_DACK_CHANGE_F |
RX_DACK_MODE_V(dack_mode));
set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
ep->com.dev->rdev.lldi.ports[0],
@ -1782,27 +1782,27 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
(nocong ? NO_CONG(1) : 0) |
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
(nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
DELACK(1) |
DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos) |
DSCP_V(ep->tos) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win));
req->tcb.opt2 = (__force __be32) (PACE(1) |
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
req->tcb.opt2 = (__force __be32) (PACE_V(1) |
TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
RX_CHANNEL_V(0) |
CCTRL_ECN(enable_ecn) |
CCTRL_ECN_V(enable_ecn) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
if (enable_tcp_timestamps)
req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
if (enable_tcp_sack)
req->tcb.opt2 |= (__force __be32)SACK_EN(1);
req->tcb.opt2 |= (__force __be32)SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
@ -2023,10 +2023,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_act_open_rpl *rpl = cplhdr(skb);
unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
ntohl(rpl->atid_status)));
unsigned int atid = TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids;
int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
struct sockaddr_in *la;
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
@ -2064,7 +2064,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID(
TID_TID_G(AOPEN_ATID_G(
ntohl(rpl->atid_status))));
return 0;
}
@ -2181,39 +2181,39 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
win = ep->rcv_win >> 10;
if (win > RCV_BUFSIZ_M)
win = RCV_BUFSIZ_M;
opt0 = (nocong ? NO_CONG(1) : 0) |
opt0 = (nocong ? NO_CONG_F : 0) |
KEEP_ALIVE_F |
DELACK(1) |
DELACK_F |
WND_SCALE_V(wscale) |
MSS_IDX_V(mtu_idx) |
L2T_IDX_V(ep->l2t->idx) |
TX_CHAN_V(ep->tx_chan) |
SMAC_SEL_V(ep->smac_idx) |
DSCP(ep->tos >> 2) |
DSCP_V(ep->tos >> 2) |
ULP_MODE_V(ULP_MODE_TCPDDP) |
RCV_BUFSIZ_V(win);
opt2 = RX_CHANNEL_V(0) |
RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
if (enable_tcp_timestamps && req->tcpopt.tstamp)
opt2 |= TSTAMPS_EN(1);
opt2 |= TSTAMPS_EN_F;
if (enable_tcp_sack && req->tcpopt.sack)
opt2 |= SACK_EN(1);
opt2 |= SACK_EN_F;
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN_F;
if (enable_ecn) {
const struct tcphdr *tcph;
u32 hlen = ntohl(req->hdr_len);
tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
G_IP_HDR_LEN(hlen);
tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
IP_HDR_LEN_G(hlen);
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1);
opt2 |= CCTRL_ECN_V(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
@ -2245,8 +2245,8 @@ static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
__u8 *local_ip, __u8 *peer_ip,
__be16 *local_port, __be16 *peer_port)
{
int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
int eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
int ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
struct tcphdr *tcp = (struct tcphdr *)
@ -2277,7 +2277,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
@ -2310,14 +2310,14 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
ntohs(peer_port), peer_mss);
dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
local_port, peer_port,
GET_POPEN_TOS(ntohl(req->tos_stid)));
PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
PASS_OPEN_TOS(ntohl(req->tos_stid)),
PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
((struct sockaddr_in6 *)
&parent_ep->com.local_addr)->sin6_scope_id);
}
@ -2375,7 +2375,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
}
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
child_ep->dst = dst;
child_ep->hwtid = hwtid;
@ -3500,24 +3500,24 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
V_SYN_MAC_IDX(G_RX_MACIDX(
req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
SYN_MAC_IDX_V(RX_MACIDX_G(
(__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
SYN_XACT_MATCH_F);
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
RX_ETHHDR_LEN_G((__force int)htonl(l2info)) :
RX_T5_ETHHDR_LEN_G((__force int)htonl(l2info));
req->hdr_len = cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
(__force int) htonl(l2info))) |
V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
(__force int) htons(hdr_len))) |
V_IP_HDR_LEN(G_RX_IPHDR_LEN(
IP_HDR_LEN_V(RX_IPHDR_LEN_G(
(__force int) htons(hdr_len))) |
V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
PASS_OPEN_TOS(tos));
req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
PASS_OPEN_TOS_V(tos));
req->tcpopt.mss = htons(tmp_opt.mss_clamp);
if (tmp_opt.wscale_ok)
req->tcpopt.wsf = tmp_opt.snd_wscale;
@ -3542,7 +3542,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
req->le.filter = (__force __be32) filter;
@ -3556,7 +3556,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
FW_OFLD_CONNECTION_WR_ASTID_V(
GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
/*
* We store the qid in opt2 which will be used by the firmware
@ -3613,7 +3613,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct neighbour *neigh;
/* Drop all non-SYN packets */
if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
goto reject;
/*
@ -3635,8 +3635,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
}
eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
RX_ETHHDR_LEN_G(htonl(cpl->l2info)) :
RX_T5_ETHHDR_LEN_G(htonl(cpl->l2info));
if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);

View File

@ -52,7 +52,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@ -122,7 +122,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(1) |
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@ -131,17 +131,17 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
res->u.cq.op = FW_RI_RES_OP_WRITE;
res->u.cq.iqid = cpu_to_be32(cq->cqid);
res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
V_FW_RI_RES_WR_IQANUS(0) |
V_FW_RI_RES_WR_IQANUD(1) |
F_FW_RI_RES_WR_IQANDST |
V_FW_RI_RES_WR_IQANDSTINDEX(
FW_RI_RES_WR_IQANUS_V(0) |
FW_RI_RES_WR_IQANUD_V(1) |
FW_RI_RES_WR_IQANDST_F |
FW_RI_RES_WR_IQANDSTINDEX_V(
rdev->lldi.ciq_ids[cq->vector]));
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
F_FW_RI_RES_WR_IQDROPRSS |
V_FW_RI_RES_WR_IQPCIECH(2) |
V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
F_FW_RI_RES_WR_IQO |
V_FW_RI_RES_WR_IQESIZE(1));
FW_RI_RES_WR_IQDROPRSS_F |
FW_RI_RES_WR_IQPCIECH_V(2) |
FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
FW_RI_RES_WR_IQO_F |
FW_RI_RES_WR_IQESIZE_V(1));
res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
@ -182,12 +182,12 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
V_CQE_OPCODE(FW_RI_SEND) |
V_CQE_TYPE(0) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(FW_RI_SEND) |
CQE_TYPE_V(0) |
CQE_SWCQE_V(1) |
CQE_QPID_V(wq->sq.qid));
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@ -215,13 +215,13 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
V_CQE_OPCODE(swcqe->opcode) |
V_CQE_TYPE(1) |
V_CQE_SWCQE(1) |
V_CQE_QPID(wq->sq.qid));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
CQE_OPCODE_V(swcqe->opcode) |
CQE_TYPE_V(1) |
CQE_SWCQE_V(1) |
CQE_QPID_V(wq->sq.qid));
CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cq->sw_queue[cq->sw_pidx] = cqe;
t4_swcq_produce(cq);
}
@ -284,7 +284,7 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
*/
PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
__func__, cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq);
swsqe->flushed = 1;
@ -301,10 +301,10 @@ static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
{
read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
read_cqe->len = htonl(wq->sq.oldest_read->read_len);
read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
V_CQE_SWCQE(SW_CQE(hw_cqe)) |
V_CQE_OPCODE(FW_RI_READ_REQ) |
V_CQE_TYPE(1));
read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
CQE_SWCQE_V(SW_CQE(hw_cqe)) |
CQE_OPCODE_V(FW_RI_READ_REQ) |
CQE_TYPE_V(1));
read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
}
@ -400,7 +400,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
} else {
swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
*swcqe = *hw_cqe;
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
t4_swcq_produce(&chp->cq);
}
next_cqe:
@ -576,7 +576,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
t4_set_wq_in_error(wq);
hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
goto proc_cqe;
}
goto proc_cqe;

View File

@ -380,12 +380,12 @@ static int dump_stag(int id, void *p, void *data)
"stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
(u32)id<<8,
G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
if (cc < space)

View File

@ -50,12 +50,12 @@ static void print_tpte(struct c4iw_dev *dev, u32 stag)
PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
"perm 0x%x ps %d len 0x%llx va 0x%llx\n",
stag & 0xffffff00,
G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
}

View File

@ -86,14 +86,14 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
sgl = (struct ulptx_sgl *)(req + 1);
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1));
ULPTX_NSGE_V(1));
sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(data);
@ -286,17 +286,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
if (reset_tpt_entry)
memset(&tpt, 0, sizeof(tpt));
else {
tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
V_FW_RI_TPTE_STAGSTATE(stag_state) |
V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
FW_RI_TPTE_STAGSTATE_V(stag_state) |
FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
(bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
FW_RI_VA_BASED_TO))|
V_FW_RI_TPTE_PS(page_size));
FW_RI_TPTE_PS_V(page_size));
tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
tpt.va_hi = cpu_to_be32((u32)(to >> 32));
tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));

View File

@ -272,7 +272,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
memset(res_wr, 0, wr_len);
res_wr->op_nres = cpu_to_be32(
FW_WR_OP_V(FW_RI_RES_WR) |
V_FW_RI_RES_WR_NRES(2) |
FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
res_wr->cookie = (unsigned long) &wr_wait;
@ -287,19 +287,19 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
V_FW_RI_RES_WR_IQID(scq->cqid));
FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
(t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
FW_RI_RES_WR_IQID_V(scq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(2) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
V_FW_RI_RES_WR_EQSIZE(eqsize));
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
FW_RI_RES_WR_FBMAX_V(2) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
res++;
@ -312,18 +312,18 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
V_FW_RI_RES_WR_IQID(rcq->cqid));
FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
FW_RI_RES_WR_IQID_V(rcq->cqid));
res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
V_FW_RI_RES_WR_DCAEN(0) |
V_FW_RI_RES_WR_DCACPU(0) |
V_FW_RI_RES_WR_FBMIN(2) |
V_FW_RI_RES_WR_FBMAX(2) |
V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
V_FW_RI_RES_WR_CIDXFTHRESH(0) |
V_FW_RI_RES_WR_EQSIZE(eqsize));
FW_RI_RES_WR_DCAEN_V(0) |
FW_RI_RES_WR_DCACPU_V(0) |
FW_RI_RES_WR_FBMIN_V(2) |
FW_RI_RES_WR_FBMAX_V(2) |
FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
FW_RI_RES_WR_CIDXFTHRESH_V(0) |
FW_RI_RES_WR_EQSIZE_V(eqsize));
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
@ -444,19 +444,19 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
case IB_WR_SEND:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
else
wqe->send.sendop_pkd = cpu_to_be32(
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
wqe->send.stag_inv = 0;
break;
case IB_WR_SEND_WITH_INV:
if (wr->send_flags & IB_SEND_SOLICITED)
wqe->send.sendop_pkd = cpu_to_be32(
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
else
wqe->send.sendop_pkd = cpu_to_be32(
V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
break;
@ -1283,8 +1283,8 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
if (qhp->attr.mpa_attr.recv_marker_enabled)
wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
@ -1776,7 +1776,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (mm5) {
mm5->key = uresp.ma_sync_key;
mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
+ A_PCIE_MA_SYNC) & PAGE_MASK;
+ PCIE_MA_SYNC_A) & PAGE_MASK;
mm5->len = PAGE_SIZE;
insert_mmap(ucontext, mm5);
}

View File

@ -41,7 +41,7 @@
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
#define PCIE_MA_SYNC_A 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@ -184,44 +184,44 @@ struct t4_cqe {
/* macros for flit 0 of the cqe */
#define S_CQE_QPID 12
#define M_CQE_QPID 0xFFFFF
#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
#define CQE_QPID_S 12
#define CQE_QPID_M 0xFFFFF
#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
#define S_CQE_SWCQE 11
#define M_CQE_SWCQE 0x1
#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
#define CQE_SWCQE_S 11
#define CQE_SWCQE_M 0x1
#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
#define S_CQE_STATUS 5
#define M_CQE_STATUS 0x1F
#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
#define CQE_STATUS_S 5
#define CQE_STATUS_M 0x1F
#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
#define S_CQE_TYPE 4
#define M_CQE_TYPE 0x1
#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
#define CQE_TYPE_S 4
#define CQE_TYPE_M 0x1
#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
#define S_CQE_OPCODE 0
#define M_CQE_OPCODE 0xF
#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
#define CQE_OPCODE_S 0
#define CQE_OPCODE_M 0xF
#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
#define RQ_TYPE(x) (!CQE_TYPE((x)))
#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
#define CQE_SEND_OPCODE(x)( \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x)->len))
@ -237,25 +237,25 @@ struct t4_cqe {
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
/* macros for flit 3 of the cqe */
#define S_CQE_GENBIT 63
#define M_CQE_GENBIT 0x1
#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
#define CQE_GENBIT_S 63
#define CQE_GENBIT_M 0x1
#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
#define S_CQE_OVFBIT 62
#define M_CQE_OVFBIT 0x1
#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
#define CQE_OVFBIT_S 62
#define CQE_OVFBIT_M 0x1
#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
#define S_CQE_IQTYPE 60
#define M_CQE_IQTYPE 0x3
#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
#define CQE_IQTYPE_S 60
#define CQE_IQTYPE_M 0x3
#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
#define M_CQE_TS 0x0fffffffffffffffULL
#define G_CQE_TS(x) ((x) & M_CQE_TS)
#define CQE_TS_M 0x0fffffffffffffffULL
#define CQE_TS_G(x) ((x) & CQE_TS_M)
#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
struct t4_swsqe {
u64 wr_id;
@ -465,14 +465,14 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->sq.pidx = %d\n",
__func__, wq->sq.pidx);
writel(PIDX_T5(inc), wq->sq.udb);
writel(PIDX_T5_V(inc), wq->sq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
@ -489,14 +489,14 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
} else {
PDBG("%s: DB wq->rq.pidx = %d\n",
__func__, wq->rq.pidx);
writel(PIDX_T5(inc), wq->rq.udb);
writel(PIDX_T5_V(inc), wq->rq.udb);
}
/* Flush user doorbell area writes. */
wmb();
return;
}
writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
@ -561,14 +561,14 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
u32 val;
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid);
while (cq->cidx_inc > CIDXINC_M) {
val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_MASK;
cq->cidx_inc -= CIDXINC_M;
}
val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
INGRESSQID(cq->cqid);
val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@ -597,11 +597,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
INGRESSQID(cq->cqid);
val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
INGRESSQID_V(cq->cqid);
writel(val, cq->gts);
cq->cidx_inc = 0;
}

View File

@ -162,102 +162,102 @@ struct fw_ri_tpte {
__be32 len_hi;
};
#define S_FW_RI_TPTE_VALID 31
#define M_FW_RI_TPTE_VALID 0x1
#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID)
#define G_FW_RI_TPTE_VALID(x) \
(((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID)
#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U)
#define FW_RI_TPTE_VALID_S 31
#define FW_RI_TPTE_VALID_M 0x1
#define FW_RI_TPTE_VALID_V(x) ((x) << FW_RI_TPTE_VALID_S)
#define FW_RI_TPTE_VALID_G(x) \
(((x) >> FW_RI_TPTE_VALID_S) & FW_RI_TPTE_VALID_M)
#define FW_RI_TPTE_VALID_F FW_RI_TPTE_VALID_V(1U)
#define S_FW_RI_TPTE_STAGKEY 23
#define M_FW_RI_TPTE_STAGKEY 0xff
#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY)
#define G_FW_RI_TPTE_STAGKEY(x) \
(((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY)
#define FW_RI_TPTE_STAGKEY_S 23
#define FW_RI_TPTE_STAGKEY_M 0xff
#define FW_RI_TPTE_STAGKEY_V(x) ((x) << FW_RI_TPTE_STAGKEY_S)
#define FW_RI_TPTE_STAGKEY_G(x) \
(((x) >> FW_RI_TPTE_STAGKEY_S) & FW_RI_TPTE_STAGKEY_M)
#define S_FW_RI_TPTE_STAGSTATE 22
#define M_FW_RI_TPTE_STAGSTATE 0x1
#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE)
#define G_FW_RI_TPTE_STAGSTATE(x) \
(((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE)
#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U)
#define FW_RI_TPTE_STAGSTATE_S 22
#define FW_RI_TPTE_STAGSTATE_M 0x1
#define FW_RI_TPTE_STAGSTATE_V(x) ((x) << FW_RI_TPTE_STAGSTATE_S)
#define FW_RI_TPTE_STAGSTATE_G(x) \
(((x) >> FW_RI_TPTE_STAGSTATE_S) & FW_RI_TPTE_STAGSTATE_M)
#define FW_RI_TPTE_STAGSTATE_F FW_RI_TPTE_STAGSTATE_V(1U)
#define S_FW_RI_TPTE_STAGTYPE 20
#define M_FW_RI_TPTE_STAGTYPE 0x3
#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE)
#define G_FW_RI_TPTE_STAGTYPE(x) \
(((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE)
#define FW_RI_TPTE_STAGTYPE_S 20
#define FW_RI_TPTE_STAGTYPE_M 0x3
#define FW_RI_TPTE_STAGTYPE_V(x) ((x) << FW_RI_TPTE_STAGTYPE_S)
#define FW_RI_TPTE_STAGTYPE_G(x) \
(((x) >> FW_RI_TPTE_STAGTYPE_S) & FW_RI_TPTE_STAGTYPE_M)
#define S_FW_RI_TPTE_PDID 0
#define M_FW_RI_TPTE_PDID 0xfffff
#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID)
#define G_FW_RI_TPTE_PDID(x) \
(((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID)
#define FW_RI_TPTE_PDID_S 0
#define FW_RI_TPTE_PDID_M 0xfffff
#define FW_RI_TPTE_PDID_V(x) ((x) << FW_RI_TPTE_PDID_S)
#define FW_RI_TPTE_PDID_G(x) \
(((x) >> FW_RI_TPTE_PDID_S) & FW_RI_TPTE_PDID_M)
#define S_FW_RI_TPTE_PERM 28
#define M_FW_RI_TPTE_PERM 0xf
#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM)
#define G_FW_RI_TPTE_PERM(x) \
(((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM)
#define FW_RI_TPTE_PERM_S 28
#define FW_RI_TPTE_PERM_M 0xf
#define FW_RI_TPTE_PERM_V(x) ((x) << FW_RI_TPTE_PERM_S)
#define FW_RI_TPTE_PERM_G(x) \
(((x) >> FW_RI_TPTE_PERM_S) & FW_RI_TPTE_PERM_M)
#define S_FW_RI_TPTE_REMINVDIS 27
#define M_FW_RI_TPTE_REMINVDIS 0x1
#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS)
#define G_FW_RI_TPTE_REMINVDIS(x) \
(((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS)
#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U)
#define FW_RI_TPTE_REMINVDIS_S 27
#define FW_RI_TPTE_REMINVDIS_M 0x1
#define FW_RI_TPTE_REMINVDIS_V(x) ((x) << FW_RI_TPTE_REMINVDIS_S)
#define FW_RI_TPTE_REMINVDIS_G(x) \
(((x) >> FW_RI_TPTE_REMINVDIS_S) & FW_RI_TPTE_REMINVDIS_M)
#define FW_RI_TPTE_REMINVDIS_F FW_RI_TPTE_REMINVDIS_V(1U)
#define S_FW_RI_TPTE_ADDRTYPE 26
#define M_FW_RI_TPTE_ADDRTYPE 1
#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE)
#define G_FW_RI_TPTE_ADDRTYPE(x) \
(((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE)
#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U)
#define FW_RI_TPTE_ADDRTYPE_S 26
#define FW_RI_TPTE_ADDRTYPE_M 1
#define FW_RI_TPTE_ADDRTYPE_V(x) ((x) << FW_RI_TPTE_ADDRTYPE_S)
#define FW_RI_TPTE_ADDRTYPE_G(x) \
(((x) >> FW_RI_TPTE_ADDRTYPE_S) & FW_RI_TPTE_ADDRTYPE_M)
#define FW_RI_TPTE_ADDRTYPE_F FW_RI_TPTE_ADDRTYPE_V(1U)
#define S_FW_RI_TPTE_MWBINDEN 25
#define M_FW_RI_TPTE_MWBINDEN 0x1
#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN)
#define G_FW_RI_TPTE_MWBINDEN(x) \
(((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN)
#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U)
#define FW_RI_TPTE_MWBINDEN_S 25
#define FW_RI_TPTE_MWBINDEN_M 0x1
#define FW_RI_TPTE_MWBINDEN_V(x) ((x) << FW_RI_TPTE_MWBINDEN_S)
#define FW_RI_TPTE_MWBINDEN_G(x) \
(((x) >> FW_RI_TPTE_MWBINDEN_S) & FW_RI_TPTE_MWBINDEN_M)
#define FW_RI_TPTE_MWBINDEN_F FW_RI_TPTE_MWBINDEN_V(1U)
#define S_FW_RI_TPTE_PS 20
#define M_FW_RI_TPTE_PS 0x1f
#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS)
#define G_FW_RI_TPTE_PS(x) \
(((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS)
#define FW_RI_TPTE_PS_S 20
#define FW_RI_TPTE_PS_M 0x1f
#define FW_RI_TPTE_PS_V(x) ((x) << FW_RI_TPTE_PS_S)
#define FW_RI_TPTE_PS_G(x) \
(((x) >> FW_RI_TPTE_PS_S) & FW_RI_TPTE_PS_M)
#define S_FW_RI_TPTE_QPID 0
#define M_FW_RI_TPTE_QPID 0xfffff
#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID)
#define G_FW_RI_TPTE_QPID(x) \
(((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID)
#define FW_RI_TPTE_QPID_S 0
#define FW_RI_TPTE_QPID_M 0xfffff
#define FW_RI_TPTE_QPID_V(x) ((x) << FW_RI_TPTE_QPID_S)
#define FW_RI_TPTE_QPID_G(x) \
(((x) >> FW_RI_TPTE_QPID_S) & FW_RI_TPTE_QPID_M)
#define S_FW_RI_TPTE_NOSNOOP 30
#define M_FW_RI_TPTE_NOSNOOP 0x1
#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP)
#define G_FW_RI_TPTE_NOSNOOP(x) \
(((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP)
#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U)
#define FW_RI_TPTE_NOSNOOP_S 30
#define FW_RI_TPTE_NOSNOOP_M 0x1
#define FW_RI_TPTE_NOSNOOP_V(x) ((x) << FW_RI_TPTE_NOSNOOP_S)
#define FW_RI_TPTE_NOSNOOP_G(x) \
(((x) >> FW_RI_TPTE_NOSNOOP_S) & FW_RI_TPTE_NOSNOOP_M)
#define FW_RI_TPTE_NOSNOOP_F FW_RI_TPTE_NOSNOOP_V(1U)
#define S_FW_RI_TPTE_PBLADDR 0
#define M_FW_RI_TPTE_PBLADDR 0x1fffffff
#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR)
#define G_FW_RI_TPTE_PBLADDR(x) \
(((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR)
#define FW_RI_TPTE_PBLADDR_S 0
#define FW_RI_TPTE_PBLADDR_M 0x1fffffff
#define FW_RI_TPTE_PBLADDR_V(x) ((x) << FW_RI_TPTE_PBLADDR_S)
#define FW_RI_TPTE_PBLADDR_G(x) \
(((x) >> FW_RI_TPTE_PBLADDR_S) & FW_RI_TPTE_PBLADDR_M)
#define S_FW_RI_TPTE_DCA 24
#define M_FW_RI_TPTE_DCA 0x1f
#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA)
#define G_FW_RI_TPTE_DCA(x) \
(((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA)
#define FW_RI_TPTE_DCA_S 24
#define FW_RI_TPTE_DCA_M 0x1f
#define FW_RI_TPTE_DCA_V(x) ((x) << FW_RI_TPTE_DCA_S)
#define FW_RI_TPTE_DCA_G(x) \
(((x) >> FW_RI_TPTE_DCA_S) & FW_RI_TPTE_DCA_M)
#define S_FW_RI_TPTE_MWBCNT_PSTAG 0
#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff
#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \
((x) << S_FW_RI_TPTE_MWBCNT_PSTAG)
#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \
(((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG)
#define FW_RI_TPTE_MWBCNT_PSTAG_S 0
#define FW_RI_TPTE_MWBCNT_PSTAG_M 0xffffff
#define FW_RI_TPTE_MWBCNT_PSTAT_V(x) \
((x) << FW_RI_TPTE_MWBCNT_PSTAG_S)
#define FW_RI_TPTE_MWBCNT_PSTAG_G(x) \
(((x) >> FW_RI_TPTE_MWBCNT_PSTAG_S) & FW_RI_TPTE_MWBCNT_PSTAG_M)
enum fw_ri_res_type {
FW_RI_RES_TYPE_SQ,
@ -308,222 +308,222 @@ struct fw_ri_res_wr {
#endif
};
#define S_FW_RI_RES_WR_NRES 0
#define M_FW_RI_RES_WR_NRES 0xff
#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES)
#define G_FW_RI_RES_WR_NRES(x) \
(((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES)
#define FW_RI_RES_WR_NRES_S 0
#define FW_RI_RES_WR_NRES_M 0xff
#define FW_RI_RES_WR_NRES_V(x) ((x) << FW_RI_RES_WR_NRES_S)
#define FW_RI_RES_WR_NRES_G(x) \
(((x) >> FW_RI_RES_WR_NRES_S) & FW_RI_RES_WR_NRES_M)
#define S_FW_RI_RES_WR_FETCHSZM 26
#define M_FW_RI_RES_WR_FETCHSZM 0x1
#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM)
#define G_FW_RI_RES_WR_FETCHSZM(x) \
(((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM)
#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U)
#define FW_RI_RES_WR_FETCHSZM_S 26
#define FW_RI_RES_WR_FETCHSZM_M 0x1
#define FW_RI_RES_WR_FETCHSZM_V(x) ((x) << FW_RI_RES_WR_FETCHSZM_S)
#define FW_RI_RES_WR_FETCHSZM_G(x) \
(((x) >> FW_RI_RES_WR_FETCHSZM_S) & FW_RI_RES_WR_FETCHSZM_M)
#define FW_RI_RES_WR_FETCHSZM_F FW_RI_RES_WR_FETCHSZM_V(1U)
#define S_FW_RI_RES_WR_STATUSPGNS 25
#define M_FW_RI_RES_WR_STATUSPGNS 0x1
#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS)
#define G_FW_RI_RES_WR_STATUSPGNS(x) \
(((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS)
#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U)
#define FW_RI_RES_WR_STATUSPGNS_S 25
#define FW_RI_RES_WR_STATUSPGNS_M 0x1
#define FW_RI_RES_WR_STATUSPGNS_V(x) ((x) << FW_RI_RES_WR_STATUSPGNS_S)
#define FW_RI_RES_WR_STATUSPGNS_G(x) \
(((x) >> FW_RI_RES_WR_STATUSPGNS_S) & FW_RI_RES_WR_STATUSPGNS_M)
#define FW_RI_RES_WR_STATUSPGNS_F FW_RI_RES_WR_STATUSPGNS_V(1U)
#define S_FW_RI_RES_WR_STATUSPGRO 24
#define M_FW_RI_RES_WR_STATUSPGRO 0x1
#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO)
#define G_FW_RI_RES_WR_STATUSPGRO(x) \
(((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO)
#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U)
#define FW_RI_RES_WR_STATUSPGRO_S 24
#define FW_RI_RES_WR_STATUSPGRO_M 0x1
#define FW_RI_RES_WR_STATUSPGRO_V(x) ((x) << FW_RI_RES_WR_STATUSPGRO_S)
#define FW_RI_RES_WR_STATUSPGRO_G(x) \
(((x) >> FW_RI_RES_WR_STATUSPGRO_S) & FW_RI_RES_WR_STATUSPGRO_M)
#define FW_RI_RES_WR_STATUSPGRO_F FW_RI_RES_WR_STATUSPGRO_V(1U)
#define S_FW_RI_RES_WR_FETCHNS 23
#define M_FW_RI_RES_WR_FETCHNS 0x1
#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS)
#define G_FW_RI_RES_WR_FETCHNS(x) \
(((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS)
#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U)
#define FW_RI_RES_WR_FETCHNS_S 23
#define FW_RI_RES_WR_FETCHNS_M 0x1
#define FW_RI_RES_WR_FETCHNS_V(x) ((x) << FW_RI_RES_WR_FETCHNS_S)
#define FW_RI_RES_WR_FETCHNS_G(x) \
(((x) >> FW_RI_RES_WR_FETCHNS_S) & FW_RI_RES_WR_FETCHNS_M)
#define FW_RI_RES_WR_FETCHNS_F FW_RI_RES_WR_FETCHNS_V(1U)
#define S_FW_RI_RES_WR_FETCHRO 22
#define M_FW_RI_RES_WR_FETCHRO 0x1
#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO)
#define G_FW_RI_RES_WR_FETCHRO(x) \
(((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO)
#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U)
#define FW_RI_RES_WR_FETCHRO_S 22
#define FW_RI_RES_WR_FETCHRO_M 0x1
#define FW_RI_RES_WR_FETCHRO_V(x) ((x) << FW_RI_RES_WR_FETCHRO_S)
#define FW_RI_RES_WR_FETCHRO_G(x) \
(((x) >> FW_RI_RES_WR_FETCHRO_S) & FW_RI_RES_WR_FETCHRO_M)
#define FW_RI_RES_WR_FETCHRO_F FW_RI_RES_WR_FETCHRO_V(1U)
#define S_FW_RI_RES_WR_HOSTFCMODE 20
#define M_FW_RI_RES_WR_HOSTFCMODE 0x3
#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE)
#define G_FW_RI_RES_WR_HOSTFCMODE(x) \
(((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE)
#define FW_RI_RES_WR_HOSTFCMODE_S 20
#define FW_RI_RES_WR_HOSTFCMODE_M 0x3
#define FW_RI_RES_WR_HOSTFCMODE_V(x) ((x) << FW_RI_RES_WR_HOSTFCMODE_S)
#define FW_RI_RES_WR_HOSTFCMODE_G(x) \
(((x) >> FW_RI_RES_WR_HOSTFCMODE_S) & FW_RI_RES_WR_HOSTFCMODE_M)
#define S_FW_RI_RES_WR_CPRIO 19
#define M_FW_RI_RES_WR_CPRIO 0x1
#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO)
#define G_FW_RI_RES_WR_CPRIO(x) \
(((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO)
#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U)
#define FW_RI_RES_WR_CPRIO_S 19
#define FW_RI_RES_WR_CPRIO_M 0x1
#define FW_RI_RES_WR_CPRIO_V(x) ((x) << FW_RI_RES_WR_CPRIO_S)
#define FW_RI_RES_WR_CPRIO_G(x) \
(((x) >> FW_RI_RES_WR_CPRIO_S) & FW_RI_RES_WR_CPRIO_M)
#define FW_RI_RES_WR_CPRIO_F FW_RI_RES_WR_CPRIO_V(1U)
#define S_FW_RI_RES_WR_ONCHIP 18
#define M_FW_RI_RES_WR_ONCHIP 0x1
#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP)
#define G_FW_RI_RES_WR_ONCHIP(x) \
(((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP)
#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U)
#define FW_RI_RES_WR_ONCHIP_S 18
#define FW_RI_RES_WR_ONCHIP_M 0x1
#define FW_RI_RES_WR_ONCHIP_V(x) ((x) << FW_RI_RES_WR_ONCHIP_S)
#define FW_RI_RES_WR_ONCHIP_G(x) \
(((x) >> FW_RI_RES_WR_ONCHIP_S) & FW_RI_RES_WR_ONCHIP_M)
#define FW_RI_RES_WR_ONCHIP_F FW_RI_RES_WR_ONCHIP_V(1U)
#define S_FW_RI_RES_WR_PCIECHN 16
#define M_FW_RI_RES_WR_PCIECHN 0x3
#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN)
#define G_FW_RI_RES_WR_PCIECHN(x) \
(((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN)
#define FW_RI_RES_WR_PCIECHN_S 16
#define FW_RI_RES_WR_PCIECHN_M 0x3
#define FW_RI_RES_WR_PCIECHN_V(x) ((x) << FW_RI_RES_WR_PCIECHN_S)
#define FW_RI_RES_WR_PCIECHN_G(x) \
(((x) >> FW_RI_RES_WR_PCIECHN_S) & FW_RI_RES_WR_PCIECHN_M)
#define S_FW_RI_RES_WR_IQID 0
#define M_FW_RI_RES_WR_IQID 0xffff
#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID)
#define G_FW_RI_RES_WR_IQID(x) \
(((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID)
#define FW_RI_RES_WR_IQID_S 0
#define FW_RI_RES_WR_IQID_M 0xffff
#define FW_RI_RES_WR_IQID_V(x) ((x) << FW_RI_RES_WR_IQID_S)
#define FW_RI_RES_WR_IQID_G(x) \
(((x) >> FW_RI_RES_WR_IQID_S) & FW_RI_RES_WR_IQID_M)
#define S_FW_RI_RES_WR_DCAEN 31
#define M_FW_RI_RES_WR_DCAEN 0x1
#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN)
#define G_FW_RI_RES_WR_DCAEN(x) \
(((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN)
#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U)
#define FW_RI_RES_WR_DCAEN_S 31
#define FW_RI_RES_WR_DCAEN_M 0x1
#define FW_RI_RES_WR_DCAEN_V(x) ((x) << FW_RI_RES_WR_DCAEN_S)
#define FW_RI_RES_WR_DCAEN_G(x) \
(((x) >> FW_RI_RES_WR_DCAEN_S) & FW_RI_RES_WR_DCAEN_M)
#define FW_RI_RES_WR_DCAEN_F FW_RI_RES_WR_DCAEN_V(1U)
#define S_FW_RI_RES_WR_DCACPU 26
#define M_FW_RI_RES_WR_DCACPU 0x1f
#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU)
#define G_FW_RI_RES_WR_DCACPU(x) \
(((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU)
#define FW_RI_RES_WR_DCACPU_S 26
#define FW_RI_RES_WR_DCACPU_M 0x1f
#define FW_RI_RES_WR_DCACPU_V(x) ((x) << FW_RI_RES_WR_DCACPU_S)
#define FW_RI_RES_WR_DCACPU_G(x) \
(((x) >> FW_RI_RES_WR_DCACPU_S) & FW_RI_RES_WR_DCACPU_M)
#define S_FW_RI_RES_WR_FBMIN 23
#define M_FW_RI_RES_WR_FBMIN 0x7
#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN)
#define G_FW_RI_RES_WR_FBMIN(x) \
(((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN)
#define FW_RI_RES_WR_FBMIN_S 23
#define FW_RI_RES_WR_FBMIN_M 0x7
#define FW_RI_RES_WR_FBMIN_V(x) ((x) << FW_RI_RES_WR_FBMIN_S)
#define FW_RI_RES_WR_FBMIN_G(x) \
(((x) >> FW_RI_RES_WR_FBMIN_S) & FW_RI_RES_WR_FBMIN_M)
#define S_FW_RI_RES_WR_FBMAX 20
#define M_FW_RI_RES_WR_FBMAX 0x7
#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX)
#define G_FW_RI_RES_WR_FBMAX(x) \
(((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX)
#define FW_RI_RES_WR_FBMAX_S 20
#define FW_RI_RES_WR_FBMAX_M 0x7
#define FW_RI_RES_WR_FBMAX_V(x) ((x) << FW_RI_RES_WR_FBMAX_S)
#define FW_RI_RES_WR_FBMAX_G(x) \
(((x) >> FW_RI_RES_WR_FBMAX_S) & FW_RI_RES_WR_FBMAX_M)
#define S_FW_RI_RES_WR_CIDXFTHRESHO 19
#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1
#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO)
#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \
(((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO)
#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U)
#define FW_RI_RES_WR_CIDXFTHRESHO_S 19
#define FW_RI_RES_WR_CIDXFTHRESHO_M 0x1
#define FW_RI_RES_WR_CIDXFTHRESHO_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESHO_S)
#define FW_RI_RES_WR_CIDXFTHRESHO_G(x) \
(((x) >> FW_RI_RES_WR_CIDXFTHRESHO_S) & FW_RI_RES_WR_CIDXFTHRESHO_M)
#define FW_RI_RES_WR_CIDXFTHRESHO_F FW_RI_RES_WR_CIDXFTHRESHO_V(1U)
#define S_FW_RI_RES_WR_CIDXFTHRESH 16
#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7
#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH)
#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \
(((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH)
#define FW_RI_RES_WR_CIDXFTHRESH_S 16
#define FW_RI_RES_WR_CIDXFTHRESH_M 0x7
#define FW_RI_RES_WR_CIDXFTHRESH_V(x) ((x) << FW_RI_RES_WR_CIDXFTHRESH_S)
#define FW_RI_RES_WR_CIDXFTHRESH_G(x) \
(((x) >> FW_RI_RES_WR_CIDXFTHRESH_S) & FW_RI_RES_WR_CIDXFTHRESH_M)
#define S_FW_RI_RES_WR_EQSIZE 0
#define M_FW_RI_RES_WR_EQSIZE 0xffff
#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE)
#define G_FW_RI_RES_WR_EQSIZE(x) \
(((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE)
#define FW_RI_RES_WR_EQSIZE_S 0
#define FW_RI_RES_WR_EQSIZE_M 0xffff
#define FW_RI_RES_WR_EQSIZE_V(x) ((x) << FW_RI_RES_WR_EQSIZE_S)
#define FW_RI_RES_WR_EQSIZE_G(x) \
(((x) >> FW_RI_RES_WR_EQSIZE_S) & FW_RI_RES_WR_EQSIZE_M)
#define S_FW_RI_RES_WR_IQANDST 15
#define M_FW_RI_RES_WR_IQANDST 0x1
#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST)
#define G_FW_RI_RES_WR_IQANDST(x) \
(((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST)
#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U)
#define FW_RI_RES_WR_IQANDST_S 15
#define FW_RI_RES_WR_IQANDST_M 0x1
#define FW_RI_RES_WR_IQANDST_V(x) ((x) << FW_RI_RES_WR_IQANDST_S)
#define FW_RI_RES_WR_IQANDST_G(x) \
(((x) >> FW_RI_RES_WR_IQANDST_S) & FW_RI_RES_WR_IQANDST_M)
#define FW_RI_RES_WR_IQANDST_F FW_RI_RES_WR_IQANDST_V(1U)
#define S_FW_RI_RES_WR_IQANUS 14
#define M_FW_RI_RES_WR_IQANUS 0x1
#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS)
#define G_FW_RI_RES_WR_IQANUS(x) \
(((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS)
#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U)
#define FW_RI_RES_WR_IQANUS_S 14
#define FW_RI_RES_WR_IQANUS_M 0x1
#define FW_RI_RES_WR_IQANUS_V(x) ((x) << FW_RI_RES_WR_IQANUS_S)
#define FW_RI_RES_WR_IQANUS_G(x) \
(((x) >> FW_RI_RES_WR_IQANUS_S) & FW_RI_RES_WR_IQANUS_M)
#define FW_RI_RES_WR_IQANUS_F FW_RI_RES_WR_IQANUS_V(1U)
#define S_FW_RI_RES_WR_IQANUD 12
#define M_FW_RI_RES_WR_IQANUD 0x3
#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD)
#define G_FW_RI_RES_WR_IQANUD(x) \
(((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD)
#define FW_RI_RES_WR_IQANUD_S 12
#define FW_RI_RES_WR_IQANUD_M 0x3
#define FW_RI_RES_WR_IQANUD_V(x) ((x) << FW_RI_RES_WR_IQANUD_S)
#define FW_RI_RES_WR_IQANUD_G(x) \
(((x) >> FW_RI_RES_WR_IQANUD_S) & FW_RI_RES_WR_IQANUD_M)
#define S_FW_RI_RES_WR_IQANDSTINDEX 0
#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff
#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX)
#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \
(((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX)
#define FW_RI_RES_WR_IQANDSTINDEX_S 0
#define FW_RI_RES_WR_IQANDSTINDEX_M 0xfff
#define FW_RI_RES_WR_IQANDSTINDEX_V(x) ((x) << FW_RI_RES_WR_IQANDSTINDEX_S)
#define FW_RI_RES_WR_IQANDSTINDEX_G(x) \
(((x) >> FW_RI_RES_WR_IQANDSTINDEX_S) & FW_RI_RES_WR_IQANDSTINDEX_M)
#define S_FW_RI_RES_WR_IQDROPRSS 15
#define M_FW_RI_RES_WR_IQDROPRSS 0x1
#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS)
#define G_FW_RI_RES_WR_IQDROPRSS(x) \
(((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS)
#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U)
#define FW_RI_RES_WR_IQDROPRSS_S 15
#define FW_RI_RES_WR_IQDROPRSS_M 0x1
#define FW_RI_RES_WR_IQDROPRSS_V(x) ((x) << FW_RI_RES_WR_IQDROPRSS_S)
#define FW_RI_RES_WR_IQDROPRSS_G(x) \
(((x) >> FW_RI_RES_WR_IQDROPRSS_S) & FW_RI_RES_WR_IQDROPRSS_M)
#define FW_RI_RES_WR_IQDROPRSS_F FW_RI_RES_WR_IQDROPRSS_V(1U)
#define S_FW_RI_RES_WR_IQGTSMODE 14
#define M_FW_RI_RES_WR_IQGTSMODE 0x1
#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE)
#define G_FW_RI_RES_WR_IQGTSMODE(x) \
(((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE)
#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U)
#define FW_RI_RES_WR_IQGTSMODE_S 14
#define FW_RI_RES_WR_IQGTSMODE_M 0x1
#define FW_RI_RES_WR_IQGTSMODE_V(x) ((x) << FW_RI_RES_WR_IQGTSMODE_S)
#define FW_RI_RES_WR_IQGTSMODE_G(x) \
(((x) >> FW_RI_RES_WR_IQGTSMODE_S) & FW_RI_RES_WR_IQGTSMODE_M)
#define FW_RI_RES_WR_IQGTSMODE_F FW_RI_RES_WR_IQGTSMODE_V(1U)
#define S_FW_RI_RES_WR_IQPCIECH 12
#define M_FW_RI_RES_WR_IQPCIECH 0x3
#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH)
#define G_FW_RI_RES_WR_IQPCIECH(x) \
(((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH)
#define FW_RI_RES_WR_IQPCIECH_S 12
#define FW_RI_RES_WR_IQPCIECH_M 0x3
#define FW_RI_RES_WR_IQPCIECH_V(x) ((x) << FW_RI_RES_WR_IQPCIECH_S)
#define FW_RI_RES_WR_IQPCIECH_G(x) \
(((x) >> FW_RI_RES_WR_IQPCIECH_S) & FW_RI_RES_WR_IQPCIECH_M)
#define S_FW_RI_RES_WR_IQDCAEN 11
#define M_FW_RI_RES_WR_IQDCAEN 0x1
#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN)
#define G_FW_RI_RES_WR_IQDCAEN(x) \
(((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN)
#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U)
#define FW_RI_RES_WR_IQDCAEN_S 11
#define FW_RI_RES_WR_IQDCAEN_M 0x1
#define FW_RI_RES_WR_IQDCAEN_V(x) ((x) << FW_RI_RES_WR_IQDCAEN_S)
#define FW_RI_RES_WR_IQDCAEN_G(x) \
(((x) >> FW_RI_RES_WR_IQDCAEN_S) & FW_RI_RES_WR_IQDCAEN_M)
#define FW_RI_RES_WR_IQDCAEN_F FW_RI_RES_WR_IQDCAEN_V(1U)
#define S_FW_RI_RES_WR_IQDCACPU 6
#define M_FW_RI_RES_WR_IQDCACPU 0x1f
#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU)
#define G_FW_RI_RES_WR_IQDCACPU(x) \
(((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU)
#define FW_RI_RES_WR_IQDCACPU_S 6
#define FW_RI_RES_WR_IQDCACPU_M 0x1f
#define FW_RI_RES_WR_IQDCACPU_V(x) ((x) << FW_RI_RES_WR_IQDCACPU_S)
#define FW_RI_RES_WR_IQDCACPU_G(x) \
(((x) >> FW_RI_RES_WR_IQDCACPU_S) & FW_RI_RES_WR_IQDCACPU_M)
#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4
#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3
#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH)
#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \
(((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH)
#define FW_RI_RES_WR_IQINTCNTTHRESH_S 4
#define FW_RI_RES_WR_IQINTCNTTHRESH_M 0x3
#define FW_RI_RES_WR_IQINTCNTTHRESH_V(x) \
((x) << FW_RI_RES_WR_IQINTCNTTHRESH_S)
#define FW_RI_RES_WR_IQINTCNTTHRESH_G(x) \
(((x) >> FW_RI_RES_WR_IQINTCNTTHRESH_S) & FW_RI_RES_WR_IQINTCNTTHRESH_M)
#define S_FW_RI_RES_WR_IQO 3
#define M_FW_RI_RES_WR_IQO 0x1
#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO)
#define G_FW_RI_RES_WR_IQO(x) \
(((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO)
#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U)
#define FW_RI_RES_WR_IQO_S 3
#define FW_RI_RES_WR_IQO_M 0x1
#define FW_RI_RES_WR_IQO_V(x) ((x) << FW_RI_RES_WR_IQO_S)
#define FW_RI_RES_WR_IQO_G(x) \
(((x) >> FW_RI_RES_WR_IQO_S) & FW_RI_RES_WR_IQO_M)
#define FW_RI_RES_WR_IQO_F FW_RI_RES_WR_IQO_V(1U)
#define S_FW_RI_RES_WR_IQCPRIO 2
#define M_FW_RI_RES_WR_IQCPRIO 0x1
#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO)
#define G_FW_RI_RES_WR_IQCPRIO(x) \
(((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO)
#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U)
#define FW_RI_RES_WR_IQCPRIO_S 2
#define FW_RI_RES_WR_IQCPRIO_M 0x1
#define FW_RI_RES_WR_IQCPRIO_V(x) ((x) << FW_RI_RES_WR_IQCPRIO_S)
#define FW_RI_RES_WR_IQCPRIO_G(x) \
(((x) >> FW_RI_RES_WR_IQCPRIO_S) & FW_RI_RES_WR_IQCPRIO_M)
#define FW_RI_RES_WR_IQCPRIO_F FW_RI_RES_WR_IQCPRIO_V(1U)
#define S_FW_RI_RES_WR_IQESIZE 0
#define M_FW_RI_RES_WR_IQESIZE 0x3
#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE)
#define G_FW_RI_RES_WR_IQESIZE(x) \
(((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE)
#define FW_RI_RES_WR_IQESIZE_S 0
#define FW_RI_RES_WR_IQESIZE_M 0x3
#define FW_RI_RES_WR_IQESIZE_V(x) ((x) << FW_RI_RES_WR_IQESIZE_S)
#define FW_RI_RES_WR_IQESIZE_G(x) \
(((x) >> FW_RI_RES_WR_IQESIZE_S) & FW_RI_RES_WR_IQESIZE_M)
#define S_FW_RI_RES_WR_IQNS 31
#define M_FW_RI_RES_WR_IQNS 0x1
#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS)
#define G_FW_RI_RES_WR_IQNS(x) \
(((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS)
#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U)
#define FW_RI_RES_WR_IQNS_S 31
#define FW_RI_RES_WR_IQNS_M 0x1
#define FW_RI_RES_WR_IQNS_V(x) ((x) << FW_RI_RES_WR_IQNS_S)
#define FW_RI_RES_WR_IQNS_G(x) \
(((x) >> FW_RI_RES_WR_IQNS_S) & FW_RI_RES_WR_IQNS_M)
#define FW_RI_RES_WR_IQNS_F FW_RI_RES_WR_IQNS_V(1U)
#define S_FW_RI_RES_WR_IQRO 30
#define M_FW_RI_RES_WR_IQRO 0x1
#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO)
#define G_FW_RI_RES_WR_IQRO(x) \
(((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO)
#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U)
#define FW_RI_RES_WR_IQRO_S 30
#define FW_RI_RES_WR_IQRO_M 0x1
#define FW_RI_RES_WR_IQRO_V(x) ((x) << FW_RI_RES_WR_IQRO_S)
#define FW_RI_RES_WR_IQRO_G(x) \
(((x) >> FW_RI_RES_WR_IQRO_S) & FW_RI_RES_WR_IQRO_M)
#define FW_RI_RES_WR_IQRO_F FW_RI_RES_WR_IQRO_V(1U)
struct fw_ri_rdma_write_wr {
__u8 opcode;
@ -562,11 +562,11 @@ struct fw_ri_send_wr {
#endif
};
#define S_FW_RI_SEND_WR_SENDOP 0
#define M_FW_RI_SEND_WR_SENDOP 0xf
#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP)
#define G_FW_RI_SEND_WR_SENDOP(x) \
(((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP)
#define FW_RI_SEND_WR_SENDOP_S 0
#define FW_RI_SEND_WR_SENDOP_M 0xf
#define FW_RI_SEND_WR_SENDOP_V(x) ((x) << FW_RI_SEND_WR_SENDOP_S)
#define FW_RI_SEND_WR_SENDOP_G(x) \
(((x) >> FW_RI_SEND_WR_SENDOP_S) & FW_RI_SEND_WR_SENDOP_M)
struct fw_ri_rdma_read_wr {
__u8 opcode;
@ -612,25 +612,25 @@ struct fw_ri_bind_mw_wr {
__be64 r4;
};
#define S_FW_RI_BIND_MW_WR_QPBINDE 6
#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1
#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE)
#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \
(((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE)
#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U)
#define FW_RI_BIND_MW_WR_QPBINDE_S 6
#define FW_RI_BIND_MW_WR_QPBINDE_M 0x1
#define FW_RI_BIND_MW_WR_QPBINDE_V(x) ((x) << FW_RI_BIND_MW_WR_QPBINDE_S)
#define FW_RI_BIND_MW_WR_QPBINDE_G(x) \
(((x) >> FW_RI_BIND_MW_WR_QPBINDE_S) & FW_RI_BIND_MW_WR_QPBINDE_M)
#define FW_RI_BIND_MW_WR_QPBINDE_F FW_RI_BIND_MW_WR_QPBINDE_V(1U)
#define S_FW_RI_BIND_MW_WR_NS 5
#define M_FW_RI_BIND_MW_WR_NS 0x1
#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS)
#define G_FW_RI_BIND_MW_WR_NS(x) \
(((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS)
#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U)
#define FW_RI_BIND_MW_WR_NS_S 5
#define FW_RI_BIND_MW_WR_NS_M 0x1
#define FW_RI_BIND_MW_WR_NS_V(x) ((x) << FW_RI_BIND_MW_WR_NS_S)
#define FW_RI_BIND_MW_WR_NS_G(x) \
(((x) >> FW_RI_BIND_MW_WR_NS_S) & FW_RI_BIND_MW_WR_NS_M)
#define FW_RI_BIND_MW_WR_NS_F FW_RI_BIND_MW_WR_NS_V(1U)
#define S_FW_RI_BIND_MW_WR_DCACPU 0
#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f
#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU)
#define G_FW_RI_BIND_MW_WR_DCACPU(x) \
(((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU)
#define FW_RI_BIND_MW_WR_DCACPU_S 0
#define FW_RI_BIND_MW_WR_DCACPU_M 0x1f
#define FW_RI_BIND_MW_WR_DCACPU_V(x) ((x) << FW_RI_BIND_MW_WR_DCACPU_S)
#define FW_RI_BIND_MW_WR_DCACPU_G(x) \
(((x) >> FW_RI_BIND_MW_WR_DCACPU_S) & FW_RI_BIND_MW_WR_DCACPU_M)
struct fw_ri_fr_nsmr_wr {
__u8 opcode;
@ -649,25 +649,25 @@ struct fw_ri_fr_nsmr_wr {
__be32 va_lo_fbo;
};
#define S_FW_RI_FR_NSMR_WR_QPBINDE 6
#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1
#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE)
#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE)
#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U)
#define FW_RI_FR_NSMR_WR_QPBINDE_S 6
#define FW_RI_FR_NSMR_WR_QPBINDE_M 0x1
#define FW_RI_FR_NSMR_WR_QPBINDE_V(x) ((x) << FW_RI_FR_NSMR_WR_QPBINDE_S)
#define FW_RI_FR_NSMR_WR_QPBINDE_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_QPBINDE_S) & FW_RI_FR_NSMR_WR_QPBINDE_M)
#define FW_RI_FR_NSMR_WR_QPBINDE_F FW_RI_FR_NSMR_WR_QPBINDE_V(1U)
#define S_FW_RI_FR_NSMR_WR_NS 5
#define M_FW_RI_FR_NSMR_WR_NS 0x1
#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS)
#define G_FW_RI_FR_NSMR_WR_NS(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS)
#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U)
#define FW_RI_FR_NSMR_WR_NS_S 5
#define FW_RI_FR_NSMR_WR_NS_M 0x1
#define FW_RI_FR_NSMR_WR_NS_V(x) ((x) << FW_RI_FR_NSMR_WR_NS_S)
#define FW_RI_FR_NSMR_WR_NS_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_NS_S) & FW_RI_FR_NSMR_WR_NS_M)
#define FW_RI_FR_NSMR_WR_NS_F FW_RI_FR_NSMR_WR_NS_V(1U)
#define S_FW_RI_FR_NSMR_WR_DCACPU 0
#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f
#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU)
#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \
(((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
#define FW_RI_FR_NSMR_WR_DCACPU_S 0
#define FW_RI_FR_NSMR_WR_DCACPU_M 0x1f
#define FW_RI_FR_NSMR_WR_DCACPU_V(x) ((x) << FW_RI_FR_NSMR_WR_DCACPU_S)
#define FW_RI_FR_NSMR_WR_DCACPU_G(x) \
(((x) >> FW_RI_FR_NSMR_WR_DCACPU_S) & FW_RI_FR_NSMR_WR_DCACPU_M)
struct fw_ri_inv_lstag_wr {
__u8 opcode;
@ -740,18 +740,18 @@ struct fw_ri_wr {
} u;
};
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
#define G_FW_RI_WR_MPAREQBIT(x) \
(((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT)
#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U)
#define FW_RI_WR_MPAREQBIT_S 7
#define FW_RI_WR_MPAREQBIT_M 0x1
#define FW_RI_WR_MPAREQBIT_V(x) ((x) << FW_RI_WR_MPAREQBIT_S)
#define FW_RI_WR_MPAREQBIT_G(x) \
(((x) >> FW_RI_WR_MPAREQBIT_S) & FW_RI_WR_MPAREQBIT_M)
#define FW_RI_WR_MPAREQBIT_F FW_RI_WR_MPAREQBIT_V(1U)
#define S_FW_RI_WR_P2PTYPE 0
#define M_FW_RI_WR_P2PTYPE 0xf
#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE)
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
#define FW_RI_WR_P2PTYPE_S 0
#define FW_RI_WR_P2PTYPE_M 0xf
#define FW_RI_WR_P2PTYPE_V(x) ((x) << FW_RI_WR_P2PTYPE_S)
#define FW_RI_WR_P2PTYPE_G(x) \
(((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M)
struct tcp_options {
__be16 mss;
@ -783,58 +783,58 @@ struct cpl_pass_accept_req {
};
/* cpl_pass_accept_req.hdr_len fields */
#define S_SYN_RX_CHAN 0
#define M_SYN_RX_CHAN 0xF
#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN)
#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN)
#define SYN_RX_CHAN_S 0
#define SYN_RX_CHAN_M 0xF
#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S)
#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M)
#define S_TCP_HDR_LEN 10
#define M_TCP_HDR_LEN 0x3F
#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
#define TCP_HDR_LEN_S 10
#define TCP_HDR_LEN_M 0x3F
#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S)
#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M)
#define S_IP_HDR_LEN 16
#define M_IP_HDR_LEN 0x3FF
#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
#define IP_HDR_LEN_S 16
#define IP_HDR_LEN_M 0x3FF
#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S)
#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M)
#define S_ETH_HDR_LEN 26
#define M_ETH_HDR_LEN 0x1F
#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
#define ETH_HDR_LEN_S 26
#define ETH_HDR_LEN_M 0x1F
#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S)
#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M)
/* cpl_pass_accept_req.l2info fields */
#define S_SYN_MAC_IDX 0
#define M_SYN_MAC_IDX 0x1FF
#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX)
#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX)
#define SYN_MAC_IDX_S 0
#define SYN_MAC_IDX_M 0x1FF
#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S)
#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M)
#define S_SYN_XACT_MATCH 9
#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH)
#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U)
#define SYN_XACT_MATCH_S 9
#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S)
#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U)
#define S_SYN_INTF 12
#define M_SYN_INTF 0xF
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
#define SYN_INTF_S 12
#define SYN_INTF_M 0xF
#define SYN_INTF_V(x) ((x) << SYN_INTF_S)
#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M)
struct ulptx_idata {
__be32 cmd_more;
__be32 len;
};
#define S_ULPTX_NSGE 0
#define M_ULPTX_NSGE 0xFFFF
#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
#define ULPTX_NSGE_S 0
#define ULPTX_NSGE_M 0xFFFF
#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S)
#define S_RX_DACK_MODE 29
#define M_RX_DACK_MODE 0x3
#define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE)
#define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE)
#define RX_DACK_MODE_S 29
#define RX_DACK_MODE_M 0x3
#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S)
#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M)
#define S_RX_DACK_CHANGE 31
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
#define RX_DACK_CHANGE_S 31
#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S)
#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
@ -843,10 +843,10 @@ enum { /* TCP congestion control algorithms */
CONG_ALG_HIGHSPEED
};
#define S_CONG_CNTRL 14
#define M_CONG_CNTRL 0x3
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define CONG_CNTRL_S 14
#define CONG_CNTRL_M 0x3
#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
#define CONG_CNTRL_VALID (1 << 18)

View File

@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/inet.h>
#include <linux/string.h>
#include <linux/mlx4/driver.h>
#include "mlx4_ib.h"

View File

@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
if (slave_id >= dev->dev->num_vfs + 1)
if (slave_id >= dev->dev->persist->num_vfs + 1)
return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num,

View File

@ -188,6 +188,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
spin_lock_init(&cq->lock);
cq->resize_buf = NULL;
cq->resize_umem = NULL;
INIT_LIST_HEAD(&cq->send_qp_list);
INIT_LIST_HEAD(&cq->recv_qp_list);
if (context) {
struct mlx4_ib_create_cq ucmd;
@ -594,6 +596,55 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
return 0;
}
static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled, int is_send)
{
struct mlx4_ib_wq *wq;
unsigned cur;
int i;
wq = is_send ? &qp->sq : &qp->rq;
cur = wq->head - wq->tail;
if (cur == 0)
return;
for (i = 0; i < cur && *npolled < num_entries; i++) {
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX4_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
(*npolled)++;
wc->qp = &qp->ibqp;
wc++;
}
}
static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx4_ib_qp *qp;
*npolled = 0;
/* Find uncompleted WQEs belonging to that cq and retrun
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
if (*npolled >= num_entries)
goto out;
}
list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) {
mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0);
if (*npolled >= num_entries)
goto out;
}
out:
return;
}
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
@ -836,8 +887,13 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
unsigned long flags;
int npolled;
int err = 0;
struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
goto out;
}
for (npolled = 0; npolled < num_entries; ++npolled) {
err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
@ -847,6 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
mlx4_cq_set_ci(&cq->mcq);
out:
spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)

View File

@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->ib_dev = &dev->ib_dev;
for (i = 0;
i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
i < min(dev->dev->caps.sqp_demux,
(u16)(dev->dev->persist->num_vfs + 1));
i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev->dev, i);

View File

@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff;
props->vendor_part_id = dev->dev->pdev->device;
props->vendor_part_id = dev->dev->persist->pdev->device;
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
enum ib_mtu tmp;
struct mlx4_cmd_mailbox *mailbox;
int err = 0;
int is_bonded = mlx4_is_bonded(mdev->dev);
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
if (is_bonded)
rtnl_lock(); /* required to get upper dev */
spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
if (ndev && is_bonded)
ndev = netdev_master_upper_dev_get(ndev);
if (!ndev)
goto out_unlock;
@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = state_to_phys_state(props->state);
out_unlock:
spin_unlock_bh(&iboe->lock);
if (is_bonded)
rtnl_unlock();
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
struct mlx4_ib_steering {
struct list_head list;
u64 reg_id;
struct mlx4_flow_reg_id reg_id;
union ib_gid gid;
};
@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
{
int err = 0, i = 0;
int err = 0, i = 0, j = 0;
struct mlx4_ib_flow *mflow;
enum mlx4_net_trans_promisc_mode type[2];
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
memset(type, 0, sizeof(type));
@ -1172,26 +1181,58 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
while (i < ARRAY_SIZE(type) && type[i]) {
err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
&mflow->reg_id[i]);
&mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
if (is_bonded) {
/* Application always sees one port so the mirror rule
* must be on port #2
*/
flow_attr->port = 2;
err = __mlx4_ib_create_flow(qp, flow_attr,
domain, type[j],
&mflow->reg_id[j].mirror);
flow_attr->port = 1;
if (err)
goto err_create_flow;
j++;
}
}
if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
&mflow->reg_id[i].id);
if (err)
goto err_create_flow;
i++;
if (is_bonded) {
flow_attr->port = 2;
err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
&mflow->reg_id[j].mirror);
flow_attr->port = 1;
if (err)
goto err_create_flow;
j++;
}
/* function to create mirror rule */
}
return &mflow->ibflow;
err_create_flow:
while (i) {
(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
mflow->reg_id[i].id);
i--;
}
while (j) {
(void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
mflow->reg_id[j].mirror);
j--;
}
err_free:
kfree(mflow);
return ERR_PTR(err);
@ -1204,10 +1245,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
struct mlx4_ib_flow *mflow = to_mflow(flow_id);
while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
if (err)
ret = err;
if (mflow->reg_id[i].mirror) {
err = __mlx4_ib_destroy_flow(mdev->dev,
mflow->reg_id[i].mirror);
if (err)
ret = err;
}
i++;
}
@ -1219,11 +1266,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
u64 reg_id;
struct mlx4_ib_steering *ib_steering = NULL;
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
struct mlx4_flow_reg_id reg_id;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
@ -1235,10 +1283,21 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
prot, &reg_id);
prot, &reg_id.id);
if (err)
goto err_malloc;
reg_id.mirror = 0;
if (mlx4_is_bonded(dev)) {
err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
(mqp->port == 1) ? 2 : 1,
!!(mqp->flags &
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
prot, &reg_id.mirror);
if (err)
goto err_add;
}
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
@ -1254,7 +1313,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
err_add:
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
prot, reg_id);
prot, reg_id.id);
if (reg_id.mirror)
mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
prot, reg_id.mirror);
err_malloc:
kfree(ib_steering);
@ -1281,10 +1343,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_dev *dev = mdev->dev;
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
u64 reg_id = 0;
struct mlx4_flow_reg_id reg_id = {0, 0};
enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
@ -1309,10 +1373,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
prot, reg_id);
prot, reg_id.id);
if (err)
return err;
if (mlx4_is_bonded(dev)) {
err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
prot, reg_id.mirror);
if (err)
return err;
}
mutex_lock(&mqp->mutex);
ge = find_gid_entry(mqp, gid->raw);
if (ge) {
@ -1376,7 +1447,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@ -1440,6 +1511,7 @@ static void update_gids_task(struct work_struct *work)
union ib_gid *gids;
int err;
struct mlx4_dev *dev = gw->dev->dev;
int is_bonded = mlx4_is_bonded(dev);
if (!gw->dev->ib_active)
return;
@ -1459,7 +1531,10 @@ static void update_gids_task(struct work_struct *work)
if (err)
pr_warn("set port command failed\n");
else
mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
if ((gw->port == 1) || !is_bonded)
mlx4_ib_dispatch_event(gw->dev,
is_bonded ? 1 : gw->port,
IB_EVENT_GID_CHANGE);
mlx4_free_cmd_mailbox(dev, mailbox);
kfree(gw);
@ -1875,7 +1950,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
* don't want the bond IP based gids in the table since
* flows that select port by gid may get the down port.
*/
if (port_state == IB_PORT_DOWN) {
if (port_state == IB_PORT_DOWN &&
!mlx4_is_bonded(ibdev->dev)) {
reset_gid_table(ibdev, port);
mlx4_ib_set_default_gid(ibdev,
curr_netdev,
@ -1938,7 +2014,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
int i;
if (mlx4_is_master(ibdev->dev)) {
for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
++slave) {
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
@ -1995,7 +2072,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) {
snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
i, j, dev->pdev->bus->name);
i, j, dev->persist->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name, NULL,
&ibdev->eq_table[eq])) {
@ -2046,6 +2123,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int err;
struct mlx4_ib_iboe *iboe;
int ib_num_ports = 0;
int num_req_counters;
pr_info_once("%s", mlx4_ib_version);
@ -2059,7 +2137,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
dev_err(&dev->persist->pdev->dev,
"Device struct alloc failed\n");
return NULL;
}
@ -2078,15 +2157,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
ibdev->dev = dev;
ibdev->bond_next_port = 0;
strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
ibdev->num_ports = num_ports;
ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
1 : ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;
ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
@ -2205,7 +2286,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
for (i = 0; i < ibdev->num_ports; ++i) {
num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
for (i = 0; i < num_req_counters; ++i) {
mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
@ -2216,12 +2298,18 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
}
if (mlx4_is_bonded(dev))
for (i = 1; i < ibdev->num_ports ; ++i)
ibdev->counters[i] = ibdev->counters[0];
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
ib_num_ports++;
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
INIT_LIST_HEAD(&ibdev->qp_list);
spin_lock_init(&ibdev->reset_flow_resource_lock);
if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
ib_num_ports) {
@ -2237,7 +2325,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
sizeof(long),
GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) {
dev_err(&dev->pdev->dev, "bit map alloc failed\n");
dev_err(&dev->persist->pdev->dev,
"bit map alloc failed\n");
goto err_steer_qp_release;
}
@ -2535,6 +2624,99 @@ out:
return;
}
static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
{
struct mlx4_ib_qp *mqp;
unsigned long flags_qp;
unsigned long flags_cq;
struct mlx4_ib_cq *send_mcq, *recv_mcq;
struct list_head cq_notify_list;
struct mlx4_cq *mcq;
unsigned long flags;
pr_warn("mlx4_ib_handle_catas_error was started\n");
INIT_LIST_HEAD(&cq_notify_list);
/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
spin_lock_irqsave(&mqp->sq.lock, flags_qp);
if (mqp->sq.tail != mqp->sq.head) {
send_mcq = to_mcq(mqp->ibqp.send_cq);
spin_lock_irqsave(&send_mcq->lock, flags_cq);
if (send_mcq->mcq.comp &&
mqp->ibqp.send_cq->comp_handler) {
if (!send_mcq->mcq.reset_notify_added) {
send_mcq->mcq.reset_notify_added = 1;
list_add_tail(&send_mcq->mcq.reset_notify,
&cq_notify_list);
}
}
spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
}
spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
/* Now, handle the QP's receive queue */
spin_lock_irqsave(&mqp->rq.lock, flags_qp);
/* no handling is needed for SRQ */
if (!mqp->ibqp.srq) {
if (mqp->rq.tail != mqp->rq.head) {
recv_mcq = to_mcq(mqp->ibqp.recv_cq);
spin_lock_irqsave(&recv_mcq->lock, flags_cq);
if (recv_mcq->mcq.comp &&
mqp->ibqp.recv_cq->comp_handler) {
if (!recv_mcq->mcq.reset_notify_added) {
recv_mcq->mcq.reset_notify_added = 1;
list_add_tail(&recv_mcq->mcq.reset_notify,
&cq_notify_list);
}
}
spin_unlock_irqrestore(&recv_mcq->lock,
flags_cq);
}
}
spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
}
list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
mcq->comp(mcq);
}
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
pr_warn("mlx4_ib_handle_catas_error ended\n");
}
static void handle_bonded_port_state_event(struct work_struct *work)
{
struct ib_event_work *ew =
container_of(work, struct ib_event_work, work);
struct mlx4_ib_dev *ibdev = ew->ib_dev;
enum ib_port_state bonded_port_state = IB_PORT_NOP;
int i;
struct ib_event ibev;
kfree(ew);
spin_lock_bh(&ibdev->iboe.lock);
for (i = 0; i < MLX4_MAX_PORTS; ++i) {
struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
enum ib_port_state curr_port_state =
(netif_running(curr_netdev) &&
netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
curr_port_state : IB_PORT_ACTIVE;
}
spin_unlock_bh(&ibdev->iboe.lock);
ibev.device = &ibdev->ib_dev;
ibev.element.port_num = 1;
ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
ib_dispatch_event(&ibev);
}
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
enum mlx4_dev_event event, unsigned long param)
{
@ -2544,6 +2726,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
struct ib_event_work *ew;
int p = 0;
if (mlx4_is_bonded(dev) &&
((event == MLX4_DEV_EVENT_PORT_UP) ||
(event == MLX4_DEV_EVENT_PORT_DOWN))) {
ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
if (!ew)
return;
INIT_WORK(&ew->work, handle_bonded_port_state_event);
ew->ib_dev = ibdev;
queue_work(wq, &ew->work);
return;
}
if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
eqe = (struct mlx4_eqe *)param;
else
@ -2570,6 +2764,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx4_ib_handle_catas_error(ibdev);
break;
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
@ -2604,7 +2799,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
}
ibev.device = ibdev_ptr;
ibev.element.port_num = (u8) p;
ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
ib_dispatch_event(&ibev);
}
@ -2613,7 +2808,8 @@ static struct mlx4_interface mlx4_ib_interface = {
.add = mlx4_ib_add,
.remove = mlx4_ib_remove,
.event = mlx4_ib_event,
.protocol = MLX4_PROT_IB_IPV6
.protocol = MLX4_PROT_IB_IPV6,
.flags = MLX4_INTFF_BONDING
};
static int __init mlx4_ib_init(void)

View File

@ -110,6 +110,9 @@ struct mlx4_ib_cq {
struct mutex resize_mutex;
struct ib_umem *umem;
struct ib_umem *resize_umem;
/* List of qps that it serves.*/
struct list_head send_qp_list;
struct list_head recv_qp_list;
};
struct mlx4_ib_mr {
@ -134,10 +137,17 @@ struct mlx4_ib_fmr {
struct mlx4_fmr mfmr;
};
#define MAX_REGS_PER_FLOW 2
struct mlx4_flow_reg_id {
u64 id;
u64 mirror;
};
struct mlx4_ib_flow {
struct ib_flow ibflow;
/* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
u64 reg_id[2];
struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
};
struct mlx4_ib_wq {
@ -293,6 +303,9 @@ struct mlx4_ib_qp {
struct mlx4_roce_smac_vlan_info pri;
struct mlx4_roce_smac_vlan_info alt;
u64 reg_id;
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
};
struct mlx4_ib_srq {
@ -527,6 +540,10 @@ struct mlx4_ib_dev {
struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
/* lock when destroying qp1_proxy and getting netdev events */
struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
u8 bond_next_port;
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
};
struct ib_event_work {
@ -622,6 +639,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx4_ib_ah, ibah);
}
static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
{
dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
return dev->bond_next_port + 1;
}
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);

View File

@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
if (!mfrpl->ibfrpl.page_list)
goto err_free;
mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
pdev->dev,
size, &mfrpl->map,
GFP_KERNEL);
if (!mfrpl->mapped_page_list)
@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
int size = page_list->max_page_list_len * sizeof (u64);
dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
dma_free_coherent(&dev->dev->persist->pdev->dev, size,
mfrpl->mapped_page_list,
mfrpl->map);
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);

View File

@ -40,11 +40,17 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
struct mlx4_ib_cq *recv_cq);
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
};
@ -93,17 +99,6 @@ enum {
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
static inline u64 mlx4_mac_to_u64(u8 *addr)
{
u64 mac = 0;
int i;
for (i = 0; i < ETH_ALEN; i++) {
mac <<= 8;
mac |= addr[i];
}
return mac;
}
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
@ -628,6 +623,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct mlx4_ib_sqp *sqp;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
struct mlx4_ib_cq *mcq;
unsigned long flags;
/* When tunneling special qps, we use a plain UD qp */
if (sqpn) {
@ -838,6 +835,24 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->mqp.event = mlx4_ib_qp_event;
if (!*caller_qp)
*caller_qp = qp;
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
/* Maintain device to QPs access, needed for further handling
* via reset flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list);
/* Maintain CQ to QPs access, needed for further handling
* via reset flow
*/
mcq = to_mcq(init_attr->send_cq);
list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
mcq = to_mcq(init_attr->recv_cq);
list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
to_mcq(init_attr->recv_cq));
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_qpn:
@ -896,13 +911,13 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
@ -912,13 +927,13 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re
{
if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
spin_unlock(&recv_cq->lock);
}
}
@ -963,6 +978,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
struct mlx4_ib_cq *send_cq, *recv_cq;
unsigned long flags;
if (qp->state != IB_QPS_RESET) {
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
@ -994,8 +1010,13 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
get_cqs(qp, &send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
/* del from lists under both locks above to protect reset flow paths */
list_del(&qp->qps_list);
list_del(&qp->cq_send_list);
list_del(&qp->cq_recv_list);
if (!is_user) {
__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
@ -1006,6 +1027,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
mlx4_qp_remove(dev->dev, &qp->mqp);
mlx4_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
mlx4_qp_free(dev->dev, &qp->mqp);
@ -1915,6 +1937,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
(ibqp->qp_type == IB_QPT_UD) ||
(ibqp->qp_type == IB_QPT_UC) ||
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI)) {
attr->port_num = mlx4_ib_bond_next_port(dev);
}
} else {
/* no sense in changing port_num
* when ports are bonded */
attr_mask &= ~IB_QP_PORT;
}
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->num_ports)) {
pr_debug("qpn 0x%x: invalid port number (%d) specified "
@ -1965,6 +2003,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
attr->port_num = 1;
out:
mutex_unlock(&qp->mutex);
return err;
@ -2609,8 +2650,15 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
__be32 uninitialized_var(lso_hdr_sz);
__be32 blh;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
spin_lock_irqsave(&qp->sq.lock, flags);
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}
ind = qp->sq_next_wqe;
@ -2908,10 +2956,18 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
int ind;
int max_gs;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
max_gs = qp->rq.max_gs;
spin_lock_irqsave(&qp->rq.lock, flags);
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {

View File

@ -316,8 +316,15 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
int err = 0;
int nreq;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
spin_lock_irqsave(&srq->lock, flags);
if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
@ -362,6 +369,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr);
}
out:
spin_unlock_irqrestore(&srq->lock, flags);

View File

@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
char base_name[9];
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
strlcpy(name, pci_name(dev->dev->pdev), max);
strlcpy(name, pci_name(dev->dev->persist->pdev), max);
strncpy(base_name, name, 8); /*till xxxx:yy:*/
base_name[8] = '\0';
/* with no ARI only 3 last bits are used so when the fn is higher than 8
@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return 0;
for (i = 0; i <= device->dev->num_vfs; ++i)
for (i = 0; i <= device->dev->persist->num_vfs; ++i)
register_one_pkey_tree(device, i);
return 0;
@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev))
return;
for (slave = device->dev->num_vfs; slave >= 0; --slave) {
for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
list_for_each_entry_safe(p, t,
&device->pkeys.pkey_port_list[slave],
entry) {

View File

@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
m = min(m, find_first_bit(&tmp, sizeof(tmp)));
m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
skip = 1 << m;
mask = skip - 1;
base = pfn;

View File

@ -373,11 +373,11 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb));
netdev->name, skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;
@ -576,11 +576,12 @@ tso_sq_no_longer_full:
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
if (skb_vlan_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb) );
netdev->name,
skb_vlan_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
wqe_fragment_length[0] = (__force __le16) skb_vlan_tag_get(skb);
} else
wqe_misc = 0;

View File

@ -754,10 +754,10 @@ dbusy_timer_handler(struct isac_hw *isac)
}
static int
open_dchannel(struct isac_hw *isac, struct channel_req *rq)
open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", isac->name, __func__,
isac->dch.dev.id, __builtin_return_address(1));
isac->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@ -771,6 +771,12 @@ open_dchannel(struct isac_hw *isac, struct channel_req *rq)
return 0;
}
static int
open_dchannel(struct isac_hw *isac, struct channel_req *rq)
{
return open_dchannel_caller(isac, rq, __builtin_return_address(0));
}
static const char *ISACVer[] =
{"2086/2186 V1.1", "2085 B1", "2085 B2",
"2085 V2.3"};
@ -1548,7 +1554,7 @@ ipac_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = open_dchannel(isac, rq);
err = open_dchannel_caller(isac, rq, __builtin_return_address(0));
else
err = open_bchannel(ipac, rq);
if (err)

View File

@ -1176,10 +1176,10 @@ w6692_l1callback(struct dchannel *dch, u32 cmd)
}
static int
open_dchannel(struct w6692_hw *card, struct channel_req *rq)
open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller)
{
pr_debug("%s: %s dev(%d) open from %p\n", card->name, __func__,
card->dch.dev.id, __builtin_return_address(1));
card->dch.dev.id, caller);
if (rq->protocol != ISDN_P_TE_S0)
return -EINVAL;
if (rq->adr.channel == 1)
@ -1207,7 +1207,7 @@ w6692_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = open_dchannel(card, rq);
err = open_dchannel(card, rq, __builtin_return_address(0));
else
err = open_bchannel(card, rq);
if (err)

View File

@ -224,20 +224,6 @@ fWrite_hfc8(hfc4s8s_hw *a, u_char c)
outb(c, a->iobase);
}
static inline void
Write_hfc16(hfc4s8s_hw *a, u_char b, u_short c)
{
SetRegAddr(a, b);
outw(c, a->iobase);
}
static inline void
Write_hfc32(hfc4s8s_hw *a, u_char b, u_long c)
{
SetRegAddr(a, b);
outl(c, a->iobase);
}
static inline void
fWrite_hfc32(hfc4s8s_hw *a, u_long c)
{
@ -265,13 +251,6 @@ Read_hfc16(hfc4s8s_hw *a, u_char b)
return (inw((volatile u_int) a->iobase));
}
static inline u_long
Read_hfc32(hfc4s8s_hw *a, u_char b)
{
SetRegAddr(a, b);
return (inl((volatile u_int) a->iobase));
}
static inline u_long
fRead_hfc32(hfc4s8s_hw *a)
{

View File

@ -59,7 +59,8 @@ isdnloop_bchan_send(isdnloop_card *card, int ch)
isdn_ctrl cmd;
while (card->sndcount[ch]) {
if ((skb = skb_dequeue(&card->bqueue[ch]))) {
skb = skb_dequeue(&card->bqueue[ch]);
if (skb) {
len = skb->len;
card->sndcount[ch] -= len;
ack = *(skb->head); /* used as scratch area */
@ -149,8 +150,7 @@ typedef struct isdnloop_stat {
int action;
} isdnloop_stat;
/* *INDENT-OFF* */
static isdnloop_stat isdnloop_stat_table[] =
{
static isdnloop_stat isdnloop_stat_table[] = {
{"BCON_", ISDN_STAT_BCONN, 1}, /* B-Channel connected */
{"BDIS_", ISDN_STAT_BHUP, 2}, /* B-Channel disconnected */
{"DCON_", ISDN_STAT_DCONN, 0}, /* D-Channel connected */
@ -317,7 +317,8 @@ isdnloop_polldchan(unsigned long data)
u_char *p;
isdn_ctrl cmd;
if ((skb = skb_dequeue(&card->dqueue)))
skb = skb_dequeue(&card->dqueue);
if (skb)
avail = skb->len;
else
avail = 0;
@ -471,8 +472,8 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
{
struct sk_buff *skb;
int len = strlen(s) + ((ch >= 0) ? 3 : 0);
if (!(skb = dev_alloc_skb(len))) {
skb = dev_alloc_skb(len);
if (!skb) {
printk(KERN_WARNING "isdnloop: Out of memory in isdnloop_fake\n");
return 1;
}
@ -483,8 +484,7 @@ isdnloop_fake(isdnloop_card *card, char *s, int ch)
return 0;
}
/* *INDENT-OFF* */
static isdnloop_stat isdnloop_cmd_table[] =
{
static isdnloop_stat isdnloop_cmd_table[] = {
{"BCON_R", 0, 1}, /* B-Channel connect */
{"BCON_I", 0, 17}, /* B-Channel connect ind */
{"BDIS_R", 0, 2}, /* B-Channel disconnect */
@ -525,10 +525,8 @@ isdnloop_fake_err(isdnloop_card *card)
isdnloop_fake(card, "NAK", -1);
}
static u_char ctable_eu[] =
{0x00, 0x11, 0x01, 0x12};
static u_char ctable_1t[] =
{0x00, 0x3b, 0x01, 0x3a};
static u_char ctable_eu[] = {0x00, 0x11, 0x01, 0x12};
static u_char ctable_1t[] = {0x00, 0x3b, 0x01, 0x3a};
/*
* Assemble a simplified cause message depending on the
@ -554,9 +552,9 @@ isdnloop_unicause(isdnloop_card *card, int loc, int cau)
sprintf(buf, "%02X44", ctable_1t[cau]);
break;
default:
return ("0000");
return "0000";
}
return (buf);
return buf;
}
/*
@ -647,10 +645,8 @@ isdnloop_kill_ctimer(isdnloop_card *card, int ch)
spin_unlock_irqrestore(&card->isdnloop_lock, flags);
}
static u_char si2bit[] =
{0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
static u_char bit2si[] =
{1, 5, 7};
static u_char si2bit[] = {0, 1, 0, 0, 0, 2, 0, 4, 0, 0};
static u_char bit2si[] = {1, 5, 7};
/*
* Try finding a listener for an outgoing call.
@ -754,17 +750,17 @@ isdnloop_vstphone(isdnloop_card *card, char *phone, int caller)
if (caller) {
for (i = 0; i < 2; i++)
if (!(strcmp(card->s0num[i], phone)))
return (phone);
return (card->s0num[0]);
return phone;
return card->s0num[0];
}
return (phone);
return phone;
break;
case ISDN_PTYPE_1TR6:
if (caller) {
sprintf(nphone, "%s%c", card->s0num[0], phone[0]);
return (nphone);
return nphone;
} else
return (&phone[strlen(phone) - 1]);
return &phone[strlen(phone) - 1];
break;
}
return "";
@ -1148,14 +1144,14 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card)
case ISDNLOOP_IOCTL_STARTUP:
if (!access_ok(VERIFY_READ, (void *) a, sizeof(isdnloop_sdef)))
return -EFAULT;
return (isdnloop_start(card, (isdnloop_sdef *) a));
return isdnloop_start(card, (isdnloop_sdef *) a);
break;
case ISDNLOOP_IOCTL_ADDCARD:
if (copy_from_user((char *)&cdef,
(char *)a,
sizeof(cdef)))
return -EFAULT;
return (isdnloop_addcard(cdef.id1));
return isdnloop_addcard(cdef.id1);
break;
case ISDNLOOP_IOCTL_LEASEDCFG:
if (a) {
@ -1377,7 +1373,7 @@ if_command(isdn_ctrl *c)
isdnloop_card *card = isdnloop_findcard(c->driver);
if (card)
return (isdnloop_command(c, card));
return isdnloop_command(c, card);
printk(KERN_ERR
"isdnloop: if_command called with invalid driverId!\n");
return -ENODEV;
@ -1391,7 +1387,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_writecmd(buf, len, 1, card));
return isdnloop_writecmd(buf, len, 1, card);
}
printk(KERN_ERR
"isdnloop: if_writecmd called with invalid driverId!\n");
@ -1406,7 +1402,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
if (card) {
if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_readstatus(buf, len, card));
return isdnloop_readstatus(buf, len, card);
}
printk(KERN_ERR
"isdnloop: if_readstatus called with invalid driverId!\n");
@ -1423,7 +1419,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
return -ENODEV;
/* ack request stored in skb scratch area */
*(skb->head) = ack;
return (isdnloop_sendbuf(channel, skb, card));
return isdnloop_sendbuf(channel, skb, card);
}
printk(KERN_ERR
"isdnloop: if_sendbuf called with invalid driverId!\n");
@ -1439,8 +1435,8 @@ isdnloop_initcard(char *id)
{
isdnloop_card *card;
int i;
if (!(card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL))) {
card = kzalloc(sizeof(isdnloop_card), GFP_KERNEL);
if (!card) {
printk(KERN_WARNING
"isdnloop: (%s) Could not allocate card-struct.\n", id);
return (isdnloop_card *) 0;
@ -1489,8 +1485,8 @@ static int
isdnloop_addcard(char *id1)
{
isdnloop_card *card;
if (!(card = isdnloop_initcard(id1))) {
card = isdnloop_initcard(id1);
if (!card) {
return -EIO;
}
printk(KERN_INFO
@ -1503,7 +1499,7 @@ static int __init
isdnloop_init(void)
{
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
return isdnloop_addcard(isdnloop_id);
return 0;
}

View File

@ -30,7 +30,7 @@ static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCo
static unsigned int io[] = {0, 0, 0, 0};
static unsigned char irq[] = {0, 0, 0, 0};
static unsigned long ram[] = {0, 0, 0, 0};
static bool do_reset = 0;
static bool do_reset;
module_param_array(io, int, NULL, 0);
module_param_array(irq, byte, NULL, 0);
@ -104,13 +104,12 @@ static int __init sc_init(void)
io[b] + 0x400 * EXP_PAGE0);
continue;
}
}
else {
} else {
/*
* Yes, probe for I/O Base
*/
if (probe_exhasted) {
pr_debug("All probe addresses exhasted, skipping\n");
pr_debug("All probe addresses exhausted, skipping\n");
continue;
}
pr_debug("Probing for I/O...\n");
@ -169,8 +168,7 @@ static int __init sc_init(void)
model = identify_board(ram[b], io[b]);
release_region(ram[b], SRAM_PAGESIZE);
}
}
else {
} else {
/*
* Yes, probe for free RAM and look for
* a signature and id the board model
@ -187,7 +185,7 @@ static int __init sc_init(void)
ram[b] = i;
break;
}
pr_debug(" Unidentifed or inaccessible\n");
pr_debug(" Unidentified or inaccessible\n");
continue;
}
pr_debug(" request failed\n");
@ -337,8 +335,7 @@ static int __init sc_init(void)
sc_adapter[cinst]->interrupt = irq[b];
if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
0, interface->id,
(void *)(unsigned long) cinst))
{
(void *)(unsigned long) cinst)) {
kfree(sc_adapter[cinst]->channel);
indicate_status(cinst, ISDN_STAT_UNLOAD, 0, NULL); /* Fix me */
kfree(interface);

View File

@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
to_copy = size - bytes_copied;
if (is_iovec) {
struct iovec *iov = (struct iovec *)src;
struct msghdr *msg = (struct msghdr *)src;
int err;
/* The iovec will track bytes_copied internally. */
err = memcpy_fromiovec((u8 *)va + page_offset,
iov, to_copy);
err = memcpy_from_msg((u8 *)va + page_offset,
msg, to_copy);
if (err != 0) {
if (kernel_if->host)
kunmap(kernel_if->u.h.page[page_index]);
@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest,
*/
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
u64 queue_offset,
const void *src,
const void *msg,
size_t src_offset, size_t size)
{
@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
* We ignore src_offset because src is really a struct iovec * and will
* maintain offset internally.
*/
return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
}
/*
@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek);
* of bytes enqueued or < 0 on error.
*/
ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
void *iov,
struct msghdr *msg,
size_t iov_size,
int buf_type)
{
ssize_t result;
if (!qpair || !iov)
if (!qpair)
return VMCI_ERROR_INVALID_ARGS;
qp_lock(qpair);
@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
result = qp_enqueue_locked(qpair->produce_q,
qpair->consume_q,
qpair->produce_q_size,
iov, iov_size,
msg, iov_size,
qp_memcpy_to_queue_iov);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&

View File

@ -78,6 +78,9 @@ static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
priv = devm_kzalloc(&pdev->dev, sizeof(struct com20020_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
priv->ci = ci;

View File

@ -467,11 +467,14 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
/* set the partner sync. to on if the partner is sync,
* and the port is matched
*/
if ((port->sm_vars & AD_PORT_MATCHED)
&& (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
if ((port->sm_vars & AD_PORT_MATCHED) &&
(lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
partner->port_state |= AD_STATE_SYNCHRONIZATION;
else
pr_debug("%s partner sync=1\n", port->slave->dev->name);
} else {
partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
pr_debug("%s partner sync=0\n", port->slave->dev->name);
}
}
}
@ -726,6 +729,8 @@ static inline void __update_lacpdu_from_port(struct port *port)
lacpdu->actor_port_priority = htons(port->actor_port_priority);
lacpdu->actor_port = htons(port->actor_port_number);
lacpdu->actor_state = port->actor_oper_port_state;
pr_debug("update lacpdu: %s, actor port state %x\n",
port->slave->dev->name, port->actor_oper_port_state);
/* lacpdu->reserved_3_1 initialized
* lacpdu->tlv_type_partner_info initialized
@ -898,7 +903,9 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;
if (port->aggregator->is_active)
port->sm_mux_state =
AD_MUX_COLLECTING_DISTRIBUTING;
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@ -910,12 +917,16 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
__set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
port->sm_mux_state = AD_MUX_DETACHED;
} else if (port->aggregator->is_active) {
port->actor_oper_port_state |=
AD_STATE_SYNCHRONIZATION;
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
!(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)) {
!(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) {
port->sm_mux_state = AD_MUX_ATTACHED;
} else {
/* if port state hasn't changed make
@ -937,8 +948,10 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
/* check if the state machine was changed */
if (port->sm_mux_state != last_state) {
pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
pr_debug("Mux Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
port->actor_port_number,
port->slave->dev->name,
last_state,
port->sm_mux_state);
switch (port->sm_mux_state) {
case AD_MUX_DETACHED:
@ -953,7 +966,12 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
break;
case AD_MUX_ATTACHED:
port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
if (port->aggregator->is_active)
port->actor_oper_port_state |=
AD_STATE_SYNCHRONIZATION;
else
port->actor_oper_port_state &=
~AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
ad_disable_collecting_distributing(port,
@ -963,6 +981,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= AD_STATE_COLLECTING;
port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
port->actor_oper_port_state |= AD_STATE_SYNCHRONIZATION;
ad_enable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
@ -1044,8 +1063,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* check if the State machine was changed or new lacpdu arrived */
if ((port->sm_rx_state != last_state) || (lacpdu)) {
pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n",
port->actor_port_number, last_state,
pr_debug("Rx Machine: Port=%d (%s), Last State=%d, Curr State=%d\n",
port->actor_port_number,
port->slave->dev->name,
last_state,
port->sm_rx_state);
switch (port->sm_rx_state) {
case AD_RX_INITIALIZE:
@ -1394,6 +1415,9 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
aggregator = __get_first_agg(port);
ad_agg_selection_logic(aggregator, update_slave_arr);
if (!port->aggregator->is_active)
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
}
/* Decide if "agg" is a better choice for the new active aggregator that
@ -2195,8 +2219,10 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
ret = RX_HANDLER_CONSUMED;
netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
port->actor_port_number);
netdev_dbg(slave->bond->dev,
"Received LACPDU on port %d slave %s\n",
port->actor_port_number,
slave->dev->name);
/* Protect against concurrent state machines */
spin_lock(&slave->bond->mode_lock);
ad_rx_machine(lacpdu, port);
@ -2288,7 +2314,10 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_MASKS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
netdev_dbg(slave->bond->dev, "Port %d slave %s changed duplex\n",
port->actor_port_number, slave->dev->name);
if (port->actor_oper_port_key & AD_DUPLEX_KEY_MASKS)
port->sm_vars |= AD_PORT_LACP_ENABLED;
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/

View File

@ -77,6 +77,7 @@
#include <net/pkt_sched.h>
#include <linux/rculist.h>
#include <net/flow_keys.h>
#include <net/switchdev.h>
#include <net/bonding.h>
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
@ -334,7 +335,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
*
* Returns zero if carrier state does not change, nonzero if it does.
*/
static int bond_set_carrier(struct bonding *bond)
int bond_set_carrier(struct bonding *bond)
{
struct list_head *iter;
struct slave *slave;
@ -789,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
new_active->delay = 0;
new_active->link = BOND_LINK_UP;
bond_set_slave_link_state(new_active, BOND_LINK_UP);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@ -979,7 +980,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
mask = features;
/* If any slave has the offload feature flag set,
* set the offload flag on the bond.
*/
mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
@ -998,7 +1003,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
NETIF_F_TSO)
static void bond_compute_features(struct bonding *bond)
{
@ -1034,7 +1039,7 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@ -1176,6 +1181,56 @@ static void bond_free_slave(struct slave *slave)
kfree(slave);
}
static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
{
info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
}
static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
{
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
}
static void bond_netdev_notify(struct net_device *dev,
struct netdev_bonding_info *info)
{
rtnl_lock();
netdev_bonding_info_change(dev, info);
rtnl_unlock();
}
static void bond_netdev_notify_work(struct work_struct *_work)
{
struct netdev_notify_work *w =
container_of(_work, struct netdev_notify_work, work.work);
bond_netdev_notify(w->dev, &w->bonding_info);
dev_put(w->dev);
kfree(w);
}
void bond_queue_slave_event(struct slave *slave)
{
struct bonding *bond = slave->bond;
struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
if (!nnw)
return;
dev_hold(slave->dev);
nnw->dev = slave->dev;
bond_fill_ifslave(slave, &nnw->bonding_info.slave);
bond_fill_ifbond(bond, &nnw->bonding_info.master);
INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
queue_delayed_work(slave->bond->wq, &nnw->work, 0);
}
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
{
@ -1439,19 +1494,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
new_slave->link = BOND_LINK_BACK;
bond_set_slave_link_state(new_slave,
BOND_LINK_BACK);
new_slave->delay = bond->params.updelay;
} else {
new_slave->link = BOND_LINK_UP;
bond_set_slave_link_state(new_slave,
BOND_LINK_UP);
}
} else {
new_slave->link = BOND_LINK_DOWN;
bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
}
} else if (bond->params.arp_interval) {
new_slave->link = (netif_carrier_ok(slave_dev) ?
BOND_LINK_UP : BOND_LINK_DOWN);
bond_set_slave_link_state(new_slave,
(netif_carrier_ok(slave_dev) ?
BOND_LINK_UP : BOND_LINK_DOWN));
} else {
new_slave->link = BOND_LINK_UP;
bond_set_slave_link_state(new_slave, BOND_LINK_UP);
}
if (new_slave->link != BOND_LINK_DOWN)
@ -1567,6 +1625,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
/* Undo stages on error */
@ -1816,11 +1875,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
{
struct bonding *bond = netdev_priv(bond_dev);
info->bond_mode = BOND_MODE(bond);
info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt;
bond_fill_ifbond(bond, info);
return 0;
}
@ -1834,10 +1889,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
info->state = bond_slave_state(slave);
info->link_failure_count = slave->link_failure_count;
bond_fill_ifslave(slave, info);
break;
}
}
@ -1867,7 +1919,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (link_state)
continue;
slave->link = BOND_LINK_FAIL;
bond_set_slave_link_state(slave, BOND_LINK_FAIL);
slave->delay = bond->params.downdelay;
if (slave->delay) {
netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@ -1882,7 +1934,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_FAIL:
if (link_state) {
/* recovered before downdelay expired */
slave->link = BOND_LINK_UP;
bond_set_slave_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
(bond->params.downdelay - slave->delay) *
@ -1904,7 +1956,7 @@ static int bond_miimon_inspect(struct bonding *bond)
if (!link_state)
continue;
slave->link = BOND_LINK_BACK;
bond_set_slave_link_state(slave, BOND_LINK_BACK);
slave->delay = bond->params.updelay;
if (slave->delay) {
@ -1917,7 +1969,8 @@ static int bond_miimon_inspect(struct bonding *bond)
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
bond_set_slave_link_state(slave,
BOND_LINK_DOWN);
netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@ -1955,7 +2008,7 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
slave->link = BOND_LINK_UP;
bond_set_slave_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
primary = rtnl_dereference(bond->primary_slave);
@ -1995,7 +2048,7 @@ static void bond_miimon_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
bond_set_slave_link_state(slave, BOND_LINK_DOWN);
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
BOND_MODE(bond) == BOND_MODE_8023AD)
@ -2578,7 +2631,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
slave->link = BOND_LINK_UP;
bond_set_slave_link_state(slave, BOND_LINK_UP);
if (current_arp_slave) {
bond_set_slave_inactive_flags(
current_arp_slave,
@ -2601,7 +2654,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
bond_set_slave_link_state(slave, BOND_LINK_DOWN);
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
@ -2680,7 +2733,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
* up when it is actually down
*/
if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
slave->link = BOND_LINK_DOWN;
bond_set_slave_link_state(slave, BOND_LINK_DOWN);
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
@ -2700,7 +2753,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (!new_slave)
goto check_state;
new_slave->link = BOND_LINK_BACK;
bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->last_link_up = jiffies;
@ -3066,7 +3119,7 @@ static int bond_open(struct net_device *bond_dev)
slave != rcu_access_pointer(bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
} else {
} else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
bond_set_slave_active_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
}
@ -3734,7 +3787,7 @@ out:
* usable slave array is formed in the control path. The xmit function
* just calculates hash and sends the packet out.
*/
int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
@ -3952,6 +4005,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
.ndo_fix_features = bond_fix_features,
.ndo_bridge_setlink = ndo_dflt_netdev_switch_port_bridge_setlink,
.ndo_bridge_dellink = ndo_dflt_netdev_switch_port_bridge_dellink,
};
static const struct device_type bond_type = {
@ -4010,7 +4065,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_FILTER;
bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
}

View File

@ -186,7 +186,7 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
{ NULL, -1, 0}
};
static const struct bond_option bond_opts[] = {
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
.name = "mode",
@ -379,8 +379,7 @@ static const struct bond_option bond_opts[] = {
.values = bond_tlb_dynamic_lb_tbl,
.flags = BOND_OPTFLAG_IFDOWN,
.set = bond_option_tlb_dynamic_lb_set,
},
{ }
}
};
/* Searches for an option by name */
@ -1182,6 +1181,7 @@ static int bond_option_min_links_set(struct bonding *bond,
netdev_info(bond->dev, "Setting min links value to %llu\n",
newval->value);
bond->params.min_links = newval->value;
bond_set_carrier(bond);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More