1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:
 "This looks like a lot but it's a mixture of regression fixes as well
  as fixes for longer standing issues.

   1) Fix on-channel cancellation in mac80211, from Johannes Berg.

   2) Handle CHECKSUM_COMPLETE properly in xt_TCPMSS netfilter xtables
      module, from Eric Dumazet.

   3) Avoid infinite loop in UDP SO_REUSEPORT logic, also from Eric
      Dumazet.

   4) Avoid a NULL deref if we try to set SO_REUSEPORT after a socket is
      bound, from Craig Gallek.

   5) GRO key comparisons don't take lightweight tunnels into account,
      from Jesse Gross.

   6) Fix struct pid leak via SCM credentials in AF_UNIX, from Eric
      Dumazet.

   7) We need to set the rtnl_link_ops of ipv6 SIT tunnels before we
      register them, otherwise the NEWLINK netlink message is missing
      the proper attributes.  From Thadeu Lima de Souza Cascardo.

   8) Several Spectrum chip bug fixes for mlxsw switch driver, from Ido
      Schimmel

   9) Handle fragments properly in ipv4 easly socket demux, from Eric
      Dumazet.

  10) Don't ignore the ifindex key specifier on ipv6 output route
      lookups, from Paolo Abeni"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (128 commits)
  tcp: avoid cwnd undo after receiving ECN
  irda: fix a potential use-after-free in ircomm_param_request
  net: tg3: avoid uninitialized variable warning
  net: nb8800: avoid uninitialized variable warning
  net: vxge: avoid unused function warnings
  net: bgmac: clarify CONFIG_BCMA dependency
  net: hp100: remove unnecessary #ifdefs
  net: davinci_cpdma: use dma_addr_t for DMA address
  ipv6/udp: use sticky pktinfo egress ifindex on connect()
  ipv6: enforce flowi6_oif usage in ip6_dst_lookup_tail()
  netlink: not trim skb for mmaped socket when dump
  vxlan: fix a out of bounds access in __vxlan_find_mac
  net: dsa: mv88e6xxx: fix port VLAN maps
  fib_trie: Fix shift by 32 in fib_table_lookup
  net: moxart: use correct accessors for DMA memory
  ipv4: ipconfig: avoid unused ic_proto_used symbol
  bnxt_en: Fix crash in bnxt_free_tx_skbs() during tx timeout.
  bnxt_en: Exclude rx_drop_pkts hw counter from the stack's rx_dropped counter.
  bnxt_en: Ring free response from close path should use completion ring
  net_sched: drr: check for NULL pointer in drr_dequeue
  ...
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2016-02-01 15:56:08 -08:00
commit 34229b2774
154 changed files with 1404 additions and 845 deletions

View File

@ -68,7 +68,7 @@ ethernet@f0b60000 {
phy1: ethernet-phy@1 { phy1: ethernet-phy@1 {
max-speed = <1000>; max-speed = <1000>;
reg = <0x1>; reg = <0x1>;
compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22"; compatible = "ethernet-phy-ieee802.3-c22";
}; };
}; };
}; };
@ -115,7 +115,7 @@ ethernet@f0ba0000 {
phy0: ethernet-phy@0 { phy0: ethernet-phy@0 {
max-speed = <1000>; max-speed = <1000>;
reg = <0x0>; reg = <0x0>;
compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22"; compatible = "ethernet-phy-ieee802.3-c22";
}; };
}; };
}; };

View File

@ -4,8 +4,6 @@ Required properties:
- compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2". - compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2".
"hisilicon,hns-dsaf-v1" is for hip05. "hisilicon,hns-dsaf-v1" is for hip05.
"hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612. "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612.
- dsa-name: dsa fabric name who provide this interface.
should be "dsafX", X is the dsaf id.
- mode: dsa fabric mode string. only support one of dsaf modes like these: - mode: dsa fabric mode string. only support one of dsaf modes like these:
"2port-64vf", "2port-64vf",
"6port-16rss", "6port-16rss",
@ -26,9 +24,8 @@ Required properties:
Example: Example:
dsa: dsa@c7000000 { dsaf0: dsa@c7000000 {
compatible = "hisilicon,hns-dsaf-v1"; compatible = "hisilicon,hns-dsaf-v1";
dsa_name = "dsaf0";
mode = "6port-16rss"; mode = "6port-16rss";
interrupt-parent = <&mbigen_dsa>; interrupt-parent = <&mbigen_dsa>;
reg = <0x0 0xC0000000 0x0 0x420000 reg = <0x0 0xC0000000 0x0 0x420000

View File

@ -4,8 +4,9 @@ Required properties:
- compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2". - compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2".
"hisilicon,hns-nic-v1" is for hip05. "hisilicon,hns-nic-v1" is for hip05.
"hisilicon,hns-nic-v2" is for Hi1610 and Hi1612. "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612.
- ae-name: accelerator name who provides this interface, - ae-handle: accelerator engine handle for hns,
is simply a name referring to the name of name in the accelerator node. specifies a reference to the associating hardware driver node.
see Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
- port-id: is the index of port provided by DSAF (the accelerator). DSAF can - port-id: is the index of port provided by DSAF (the accelerator). DSAF can
connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They
are called debug ports. are called debug ports.
@ -41,7 +42,7 @@ Example:
ethernet@0{ ethernet@0{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <0>; port-id = <0>;
local-mac-address = [a2 14 e4 4b 56 76]; local-mac-address = [a2 14 e4 4b 56 76];
}; };

View File

@ -6,12 +6,17 @@ Required properties:
- interrupts: interrupt for the device - interrupts: interrupt for the device
- phy: See ethernet.txt file in the same directory. - phy: See ethernet.txt file in the same directory.
- phy-mode: See ethernet.txt file in the same directory - phy-mode: See ethernet.txt file in the same directory
- clocks: a pointer to the reference clock for this device. - clocks: List of clocks for this device. At least one clock is
mandatory for the core clock. If several clocks are given, then the
clock-names property must be used to identify them.
Optional properties: Optional properties:
- tx-csum-limit: maximum mtu supported by port that allow TX checksum. - tx-csum-limit: maximum mtu supported by port that allow TX checksum.
Value is presented in bytes. If not used, by default 1600B is set for Value is presented in bytes. If not used, by default 1600B is set for
"marvell,armada-370-neta" and 9800B for others. "marvell,armada-370-neta" and 9800B for others.
- clock-names: List of names corresponding to clocks property; shall be
"core" for core clock and "bus" for the optional bus clock.
Example: Example:

View File

@ -38,7 +38,6 @@ Example :
phy11: ethernet-phy@1 { phy11: ethernet-phy@1 {
reg = <1>; reg = <1>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -48,7 +47,6 @@ Example :
}; };
phy12: ethernet-phy@2 { phy12: ethernet-phy@2 {
reg = <2>; reg = <2>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -58,7 +56,6 @@ Example :
}; };
phy13: ethernet-phy@3 { phy13: ethernet-phy@3 {
reg = <3>; reg = <3>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -68,7 +65,6 @@ Example :
}; };
phy14: ethernet-phy@4 { phy14: ethernet-phy@4 {
reg = <4>; reg = <4>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -85,7 +81,6 @@ Example :
phy21: ethernet-phy@1 { phy21: ethernet-phy@1 {
reg = <1>; reg = <1>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -95,7 +90,6 @@ Example :
}; };
phy22: ethernet-phy@2 { phy22: ethernet-phy@2 {
reg = <2>; reg = <2>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -105,7 +99,6 @@ Example :
}; };
phy23: ethernet-phy@3 { phy23: ethernet-phy@3 {
reg = <3>; reg = <3>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -115,7 +108,6 @@ Example :
}; };
phy24: ethernet-phy@4 { phy24: ethernet-phy@4 {
reg = <4>; reg = <4>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,

View File

@ -47,7 +47,6 @@ Example :
phy11: ethernet-phy@1 { phy11: ethernet-phy@1 {
reg = <1>; reg = <1>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -57,7 +56,6 @@ Example :
}; };
phy12: ethernet-phy@2 { phy12: ethernet-phy@2 {
reg = <2>; reg = <2>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -67,7 +65,6 @@ Example :
}; };
phy13: ethernet-phy@3 { phy13: ethernet-phy@3 {
reg = <3>; reg = <3>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -77,7 +74,6 @@ Example :
}; };
phy14: ethernet-phy@4 { phy14: ethernet-phy@4 {
reg = <4>; reg = <4>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -94,7 +90,6 @@ Example :
phy21: ethernet-phy@1 { phy21: ethernet-phy@1 {
reg = <1>; reg = <1>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -104,7 +99,6 @@ Example :
}; };
phy22: ethernet-phy@2 { phy22: ethernet-phy@2 {
reg = <2>; reg = <2>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -114,7 +108,6 @@ Example :
}; };
phy23: ethernet-phy@3 { phy23: ethernet-phy@3 {
reg = <3>; reg = <3>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,
@ -124,7 +117,6 @@ Example :
}; };
phy24: ethernet-phy@4 { phy24: ethernet-phy@4 {
reg = <4>; reg = <4>;
compatible = "marvell,88e1149r";
marvell,reg-init = <3 0x10 0 0x5777>, marvell,reg-init = <3 0x10 0 0x5777>,
<3 0x11 0 0x00aa>, <3 0x11 0 0x00aa>,
<3 0x12 0 0x4105>, <3 0x12 0 0x4105>,

View File

@ -17,8 +17,7 @@ Optional Properties:
"ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for
PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45 PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45
specifications. If neither of these are specified, the default is to specifications. If neither of these are specified, the default is to
assume clause 22. The compatible list may also contain other assume clause 22.
elements.
If the phy's identifier is known then the list may contain an entry If the phy's identifier is known then the list may contain an entry
of the form: "ethernet-phy-idAAAA.BBBB" where of the form: "ethernet-phy-idAAAA.BBBB" where
@ -28,6 +27,9 @@ Optional Properties:
4 hex digits. This is the chip vendor OUI bits 19:24, 4 hex digits. This is the chip vendor OUI bits 19:24,
followed by 10 bits of a vendor specific ID. followed by 10 bits of a vendor specific ID.
The compatible list should not contain other values than those
listed here.
- max-speed: Maximum PHY supported speed (10, 100, 1000...) - max-speed: Maximum PHY supported speed (10, 100, 1000...)
- broken-turn-around: If set, indicates the PHY device does not correctly - broken-turn-around: If set, indicates the PHY device does not correctly

View File

@ -594,7 +594,7 @@ tcp_fastopen - INTEGER
tcp_syn_retries - INTEGER tcp_syn_retries - INTEGER
Number of times initial SYNs for an active TCP connection attempt Number of times initial SYNs for an active TCP connection attempt
will be retransmitted. Should not be higher than 255. Default value will be retransmitted. Should not be higher than 127. Default value
is 6, which corresponds to 63seconds till the last retransmission is 6, which corresponds to 63seconds till the last retransmission
with the current initial RTO of 1second. With this the final timeout with the current initial RTO of 1second. With this the final timeout
for an active TCP connection attempt will happen after 127seconds. for an active TCP connection attempt will happen after 127seconds.

View File

@ -10173,7 +10173,7 @@ F: drivers/net/ethernet/natsemi/sonic.*
SONICS SILICON BACKPLANE DRIVER (SSB) SONICS SILICON BACKPLANE DRIVER (SSB)
M: Michael Buesch <m@bues.ch> M: Michael Buesch <m@bues.ch>
L: netdev@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Maintained S: Maintained
F: drivers/ssb/ F: drivers/ssb/
F: include/linux/ssb/ F: include/linux/ssb/

View File

@ -23,9 +23,8 @@ soc0: soc@000000000 {
}; };
}; };
dsa: dsa@c7000000 { dsaf0: dsa@c7000000 {
compatible = "hisilicon,hns-dsaf-v1"; compatible = "hisilicon,hns-dsaf-v1";
dsa_name = "dsaf0";
mode = "6port-16rss"; mode = "6port-16rss";
interrupt-parent = <&mbigen_dsa>; interrupt-parent = <&mbigen_dsa>;
@ -127,7 +126,7 @@ soc0: soc@000000000 {
eth0: ethernet@0{ eth0: ethernet@0{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <0>; port-id = <0>;
local-mac-address = [00 00 00 01 00 58]; local-mac-address = [00 00 00 01 00 58];
status = "disabled"; status = "disabled";
@ -135,14 +134,14 @@ soc0: soc@000000000 {
}; };
eth1: ethernet@1{ eth1: ethernet@1{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <1>; port-id = <1>;
status = "disabled"; status = "disabled";
dma-coherent; dma-coherent;
}; };
eth2: ethernet@2{ eth2: ethernet@2{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <2>; port-id = <2>;
local-mac-address = [00 00 00 01 00 5a]; local-mac-address = [00 00 00 01 00 5a];
status = "disabled"; status = "disabled";
@ -150,7 +149,7 @@ soc0: soc@000000000 {
}; };
eth3: ethernet@3{ eth3: ethernet@3{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <3>; port-id = <3>;
local-mac-address = [00 00 00 01 00 5b]; local-mac-address = [00 00 00 01 00 5b];
status = "disabled"; status = "disabled";
@ -158,7 +157,7 @@ soc0: soc@000000000 {
}; };
eth4: ethernet@4{ eth4: ethernet@4{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <4>; port-id = <4>;
local-mac-address = [00 00 00 01 00 5c]; local-mac-address = [00 00 00 01 00 5c];
status = "disabled"; status = "disabled";
@ -166,7 +165,7 @@ soc0: soc@000000000 {
}; };
eth5: ethernet@5{ eth5: ethernet@5{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <5>; port-id = <5>;
local-mac-address = [00 00 00 01 00 5d]; local-mac-address = [00 00 00 01 00 5d];
status = "disabled"; status = "disabled";
@ -174,7 +173,7 @@ soc0: soc@000000000 {
}; };
eth6: ethernet@6{ eth6: ethernet@6{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <6>; port-id = <6>;
local-mac-address = [00 00 00 01 00 5e]; local-mac-address = [00 00 00 01 00 5e];
status = "disabled"; status = "disabled";
@ -182,7 +181,7 @@ soc0: soc@000000000 {
}; };
eth7: ethernet@7{ eth7: ethernet@7{
compatible = "hisilicon,hns-nic-v1"; compatible = "hisilicon,hns-nic-v1";
ae-name = "dsaf0"; ae-handle = <&dsaf0>;
port-id = <7>; port-id = <7>;
local-mac-address = [00 00 00 01 00 5f]; local-mac-address = [00 00 00 01 00 5f];
status = "disabled"; status = "disabled";

View File

@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
/* no PVID with ranges, otherwise it's a bug */ /* no PVID with ranges, otherwise it's a bug */
if (pvid) if (pvid)
err = _mv88e6xxx_port_pvid_set(ds, port, vid); err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
unlock: unlock:
mutex_unlock(&ps->smi_mutex); mutex_unlock(&ps->smi_mutex);
@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* database, and allow every port to egress frames on all other ports. * database, and allow every port to egress frames on all other ports.
*/ */
reg = BIT(ps->num_ports) - 1; /* all ports */ reg = BIT(ps->num_ports) - 1; /* all ports */
ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); reg &= ~BIT(port); /* except itself */
ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
if (ret) if (ret)
goto abort; goto abort;

View File

@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
int ret; int ret;
ring = pdata->rx_ring; ring = pdata->rx_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); IRQF_SHARED, ring->irq_name, ring);
if (ret) if (ret)
@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
if (pdata->cq_cnt) { if (pdata->cq_cnt) {
ring = pdata->tx_ring->cp_ring; ring = pdata->tx_ring->cp_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); IRQF_SHARED, ring->irq_name, ring);
if (ret) { if (ret) {
@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev)
static void xgene_enet_free_irq(struct net_device *ndev) static void xgene_enet_free_irq(struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata; struct xgene_enet_pdata *pdata;
struct xgene_enet_desc_ring *ring;
struct device *dev; struct device *dev;
pdata = netdev_priv(ndev); pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev); dev = ndev_to_dev(ndev);
devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); ring = pdata->rx_ring;
irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring);
if (pdata->cq_cnt) { if (pdata->cq_cnt) {
devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, ring = pdata->tx_ring->cp_ring;
pdata->tx_ring->cp_ring); irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring);
} }
} }

View File

@ -25,6 +25,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/of_net.h> #include <linux/of_net.h>

View File

@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
nb8800_tx_done(dev); nb8800_tx_done(dev);
again: again:
while (work < budget) { do {
struct nb8800_rx_buf *rxb; struct nb8800_rx_buf *rxb;
unsigned int len; unsigned int len;
@ -330,7 +330,7 @@ again:
rxd->report = 0; rxd->report = 0;
last = next; last = next;
work++; work++;
} } while (work < budget);
if (work) { if (work) {
priv->rx_descs[last].desc.config |= DESC_EOC; priv->rx_descs[last].desc.config |= DESC_EOC;

View File

@ -151,8 +151,11 @@ config BNX2X_VXLAN
config BGMAC config BGMAC
tristate "BCMA bus GBit core support" tristate "BCMA bus GBit core support"
depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) depends on BCMA && BCMA_HOST_SOC
depends on HAS_DMA
depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
select PHYLIB select PHYLIB
select FIXED_PHY
---help--- ---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
They can be found on BCM47xx SoCs and provide gigabit ethernet. They can be found on BCM47xx SoCs and provide gigabit ethernet.

View File

@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
last = tx_buf->nr_frags; last = tx_buf->nr_frags;
j += 2; j += 2;
for (k = 0; k < last; k++, j = NEXT_TX(j)) { for (k = 0; k < last; k++, j++) {
int ring_idx = j & bp->tx_ring_mask;
skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
tx_buf = &txr->tx_buf_ring[j]; tx_buf = &txr->tx_buf_ring[ring_idx];
dma_unmap_page( dma_unmap_page(
&pdev->dev, &pdev->dev,
dma_unmap_addr(tx_buf, mapping), dma_unmap_addr(tx_buf, mapping),
@ -3406,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code; u16 error_code;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req.ring_type = ring_type; req.ring_type = ring_type;
req.ring_id = cpu_to_le16(ring->fw_ring_id); req.ring_id = cpu_to_le16(ring->fw_ring_id);
@ -4819,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
} }

View File

@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* Ethernet MAC ISRs * Ethernet MAC ISRs
*/ */
if (priv->internal_phy) if (priv->internal_phy)
priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; priv->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0; return 0;
} }

View File

@ -12016,7 +12016,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
int ret; int ret;
u32 offset, len, b_offset, odd_len; u32 offset, len, b_offset, odd_len;
u8 *buf; u8 *buf;
__be32 start, end; __be32 start = 0, end;
if (tg3_flag(tp, NO_NVRAM) || if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC) eeprom->magic != TG3_EEPROM_MAGIC)

View File

@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
struct device_node *phy_node; struct device_node *phy_node;
const struct macb_config *macb_config = NULL; const struct macb_config *macb_config = NULL;
struct clk *pclk, *hclk, *tx_clk; struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
unsigned int queue_mask, num_queues; unsigned int queue_mask, num_queues;
struct macb_platform_data *pdata; struct macb_platform_data *pdata;
bool native_io; bool native_io;

View File

@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts) struct timespec64 *ts)
{ {
u64 ns; u64 ns;
u32 remainder;
unsigned long flags; unsigned long flags;
struct lio *lio = container_of(ptp, struct lio, ptp_info); struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
ns += lio->ptp_adjust; ns += lio->ptp_adjust;
spin_unlock_irqrestore(&lio->ptp_lock, flags); spin_unlock_irqrestore(&lio->ptp_lock, flags);
ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); *ts = ns_to_timespec64(ns);
ts->tv_nsec = remainder;
return 0; return 0;
} }

View File

@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support" tristate "EZchip NPS management enet support"
depends on OF_IRQ && OF_NET depends on OF_IRQ && OF_NET
depends on HAS_IOMEM
---help--- ---help---
Simple LAN device for debug or management purposes. Simple LAN device for debug or management purposes.
Device supports interrupts for RX and TX(completion). Device supports interrupts for RX and TX(completion).

View File

@ -4,6 +4,9 @@
obj-$(CONFIG_FEC) += fec.o obj-$(CONFIG_FEC) += fec.o
fec-objs :=fec_main.o fec_ptp.o fec-objs :=fec_main.o fec_ptp.o
CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o

View File

@ -19,8 +19,7 @@
#include <linux/timecounter.h> #include <linux/timecounter.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
/* /*
* Just figures, Motorola would have to change the offsets for * Just figures, Motorola would have to change the offsets for
* registers in the same peripheral device on different models * registers in the same peripheral device on different models
@ -190,28 +189,45 @@
/* /*
* Define the buffer descriptor structure. * Define the buffer descriptor structure.
*
* Evidently, ARM SoCs have the FEC block generated in a
* little endian mode so adjust endianness accordingly.
*/ */
#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) #if defined(CONFIG_ARM)
#define fec32_to_cpu le32_to_cpu
#define fec16_to_cpu le16_to_cpu
#define cpu_to_fec32 cpu_to_le32
#define cpu_to_fec16 cpu_to_le16
#define __fec32 __le32
#define __fec16 __le16
struct bufdesc { struct bufdesc {
unsigned short cbd_datlen; /* Data length */ __fec16 cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */ __fec16 cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */ __fec32 cbd_bufaddr; /* Buffer address */
}; };
#else #else
#define fec32_to_cpu be32_to_cpu
#define fec16_to_cpu be16_to_cpu
#define cpu_to_fec32 cpu_to_be32
#define cpu_to_fec16 cpu_to_be16
#define __fec32 __be32
#define __fec16 __be16
struct bufdesc { struct bufdesc {
unsigned short cbd_sc; /* Control and status info */ __fec16 cbd_sc; /* Control and status info */
unsigned short cbd_datlen; /* Data length */ __fec16 cbd_datlen; /* Data length */
unsigned long cbd_bufaddr; /* Buffer address */ __fec32 cbd_bufaddr; /* Buffer address */
}; };
#endif #endif
struct bufdesc_ex { struct bufdesc_ex {
struct bufdesc desc; struct bufdesc desc;
unsigned long cbd_esc; __fec32 cbd_esc;
unsigned long cbd_prot; __fec32 cbd_prot;
unsigned long cbd_bdu; __fec32 cbd_bdu;
unsigned long ts; __fec32 ts;
unsigned short res0[4]; __fec16 res0[4];
}; };
/* /*

View File

@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
bdp = txq->tx_bd_base; bdp = txq->tx_bd_base;
do { do {
pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
index, index,
bdp == txq->cur_tx ? 'S' : ' ', bdp == txq->cur_tx ? 'S' : ' ',
bdp == txq->dirty_tx ? 'H' : ' ', bdp == txq->dirty_tx ? 'H' : ' ',
bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, fec16_to_cpu(bdp->cbd_sc),
fec32_to_cpu(bdp->cbd_bufaddr),
fec16_to_cpu(bdp->cbd_datlen),
txq->tx_skbuff[index]); txq->tx_skbuff[index]);
bdp = fec_enet_get_nextdesc(bdp, fep, 0); bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++; index++;
@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp; ebdp = (struct bufdesc_ex *)bdp;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
frag_len = skb_shinfo(skb)->frags[frag].size; frag_len = skb_shinfo(skb)->frags[frag].size;
@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
goto dma_mapping_error; goto dma_mapping_error;
} }
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
bdp->cbd_datlen = frag_len; bdp->cbd_datlen = cpu_to_fec16(frag_len);
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
} }
return bdp; return bdp;
@ -445,8 +447,8 @@ dma_mapping_error:
bdp = txq->cur_tx; bdp = txq->cur_tx;
for (i = 0; i < frag; i++) { for (i = 0; i < frag; i++) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
bdp->cbd_datlen, DMA_TO_DEVICE); fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
} }
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
/* Fill in a Tx ring entry */ /* Fill in a Tx ring entry */
bdp = txq->cur_tx; bdp = txq->cur_tx;
last_bdp = bdp; last_bdp = bdp;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */ /* Set buffer length and buffer pointer */
@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */ /* Save skb pointer */
txq->tx_skbuff[index] = skb; txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen; bdp->cbd_datlen = cpu_to_fec16(buflen);
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
/* Send it on its way. Tell FEC it's ready, interrupt when done, /* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end. * it's the last BD of the frame, and to put the CRC on the end.
*/ */
status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
/* If this was the last BD in the ring, start at the beginning again. */ /* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
unsigned int estatus = 0; unsigned int estatus = 0;
dma_addr_t addr; dma_addr_t addr;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
bdp->cbd_datlen = size; bdp->cbd_datlen = cpu_to_fec16(size);
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = cpu_to_fec32(addr);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
/* Handle the last BD specially */ /* Handle the last BD specially */
@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
if (is_last) { if (is_last) {
status |= BD_ENET_TX_INTR; status |= BD_ENET_TX_INTR;
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
ebdp->cbd_esc |= BD_ENET_TX_INT; ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
} }
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
return 0; return 0;
} }
@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
unsigned short status; unsigned short status;
unsigned int estatus = 0; unsigned int estatus = 0;
status = bdp->cbd_sc; status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS; status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
} }
} }
bdp->cbd_bufaddr = dmabuf; bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
bdp->cbd_datlen = hdr_len; bdp->cbd_datlen = cpu_to_fec16(hdr_len);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (fep->quirks & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
ebdp->cbd_esc = estatus; ebdp->cbd_esc = cpu_to_fec32(estatus);
} }
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
return 0; return 0;
} }
@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
else else
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, fep, q);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, fep, q);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
rxq->cur_rx = rxq->rx_bd_base; rxq->cur_rx = rxq->rx_bd_base;
} }
@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
for (i = 0; i < txq->tx_ring_size; i++) { for (i = 0; i < txq->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
if (txq->tx_skbuff[i]) { if (txq->tx_skbuff[i]) {
dev_kfree_skb_any(txq->tx_skbuff[i]); dev_kfree_skb_any(txq->tx_skbuff[i]);
txq->tx_skbuff[i] = NULL; txq->tx_skbuff[i] = NULL;
} }
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = cpu_to_fec32(0);
bdp = fec_enet_get_nextdesc(bdp, fep, q); bdp = fec_enet_get_nextdesc(bdp, fep, q);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep, q); bdp = fec_enet_get_prevdesc(bdp, fep, q);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
txq->dirty_tx = bdp; txq->dirty_tx = bdp;
} }
} }
@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
*/ */
if (fep->quirks & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel((__force u32)cpu_to_be32(temp_mac[0]),
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); fep->hwp + FEC_ADDR_LOW);
writel((__force u32)cpu_to_be32(temp_mac[1]),
fep->hwp + FEC_ADDR_HIGH);
} }
/* Clear any outstanding interrupt. */ /* Clear any outstanding interrupt. */
@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
while (bdp != READ_ONCE(txq->cur_tx)) { while (bdp != READ_ONCE(txq->cur_tx)) {
/* Order the load of cur_tx and cbd_sc */ /* Order the load of cur_tx and cbd_sc */
rmb(); rmb();
status = READ_ONCE(bdp->cbd_sc); status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
if (status & BD_ENET_TX_READY) if (status & BD_ENET_TX_READY)
break; break;
@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
skb = txq->tx_skbuff[index]; skb = txq->tx_skbuff[index];
txq->tx_skbuff[index] = NULL; txq->tx_skbuff[index] = NULL;
if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev,
bdp->cbd_datlen, DMA_TO_DEVICE); fec32_to_cpu(bdp->cbd_bufaddr),
bdp->cbd_bufaddr = 0; fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
if (!skb) { if (!skb) {
bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue; continue;
@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
skb_tstamp_tx(skb, &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps);
} }
@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
if (off) if (off)
skb_reserve(skb, fep->rx_align + 1 - off); skb_reserve(skb, fep->rx_align + 1 - off);
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
FEC_ENET_RX_FRSIZE - fep->rx_align, if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
DMA_FROM_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "Rx DMA memory map failed\n"); netdev_err(ndev, "Rx DMA memory map failed\n");
return -ENOMEM; return -ENOMEM;
@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
if (!new_skb) if (!new_skb)
return false; return false;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, dma_sync_single_for_cpu(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (!swap) if (!swap)
@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
*/ */
bdp = rxq->cur_rx; bdp = rxq->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
if (pkt_received >= budget) if (pkt_received >= budget)
break; break;
@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Process the incoming frame. */ /* Process the incoming frame. */
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen; pkt_len = fec16_to_cpu(bdp->cbd_datlen);
ndev->stats.rx_bytes += pkt_len; ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_dropped++; ndev->stats.rx_dropped++;
goto rx_processing_done; goto rx_processing_done;
} }
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, dma_unmap_single(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* If this is a VLAN packet remove the VLAN Tag */ /* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false; vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { fep->bufdesc_ex &&
(ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
/* Push and remove the vlan tag */ /* Push and remove the vlan tag */
struct vlan_hdr *vlan_header = struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN); (struct vlan_hdr *) (data + ETH_HLEN);
@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
/* Get receive timestamp from the skb */ /* Get receive timestamp from the skb */
if (fep->hwts_rx_en && fep->bufdesc_ex) if (fep->hwts_rx_en && fep->bufdesc_ex)
fec_enet_hwtstamp(fep, ebdp->ts, fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
skb_hwtstamps(skb)); skb_hwtstamps(skb));
if (fep->bufdesc_ex && if (fep->bufdesc_ex &&
(fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
/* don't check it */ /* don't check it */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} else { } else {
@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
napi_gro_receive(&fep->napi, skb); napi_gro_receive(&fep->napi, skb);
if (is_copybreak) { if (is_copybreak) {
dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, dma_sync_single_for_device(&fep->pdev->dev,
fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else { } else {
@ -1527,12 +1535,12 @@ rx_processing_done:
/* Mark the buffer empty */ /* Mark the buffer empty */
status |= BD_ENET_RX_EMPTY; status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status; bdp->cbd_sc = cpu_to_fec16(status);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
ebdp->cbd_prot = 0; ebdp->cbd_prot = 0;
ebdp->cbd_bdu = 0; ebdp->cbd_bdu = 0;
} }
@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
/* List of registers that can be safety be read to dump them with ethtool */ /* List of registers that can be safety be read to dump them with ethtool */
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
static u32 fec_enet_register_offset[] = { static u32 fec_enet_register_offset[] = {
FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
rxq->rx_skbuff[i] = NULL; rxq->rx_skbuff[i] = NULL;
if (skb) { if (skb) {
dma_unmap_single(&fep->pdev->dev, dma_unmap_single(&fep->pdev->dev,
bdp->cbd_bufaddr, fec32_to_cpu(bdp->cbd_bufaddr),
FEC_ENET_RX_FRSIZE - fep->rx_align, FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
} }
rxq->rx_skbuff[i] = skb; rxq->rx_skbuff[i] = skb;
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;
err_alloc: err_alloc:
@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
if (!txq->tx_bounce[i]) if (!txq->tx_bounce[i])
goto err_alloc; goto err_alloc;
bdp->cbd_sc = 0; bdp->cbd_sc = cpu_to_fec16(0);
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = cpu_to_fec32(0);
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
ebdp->cbd_esc = BD_ENET_TX_INT; ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
} }
bdp = fec_enet_get_nextdesc(bdp, fep, queue); bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
return 0; return 0;

View File

@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
cbd_t __iomem *prev_bd; cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd; cbd_t __iomem *last_tx_bd;
last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
/* get the current bd held in TBPTR and scan back from this point */ /* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *) recheck_bd = curr_tbptr = (cbd_t __iomem *)

View File

@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = {
static int __ae_match(struct device *dev, const void *data) static int __ae_match(struct device *dev, const void *data)
{ {
struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
const char *ae_id = data;
if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) return hdev->dev->of_node == data;
return 1;
return 0;
} }
static struct hnae_ae_dev *find_ae(const char *ae_id) static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
{ {
struct device *dev; struct device *dev;
WARN_ON(!ae_id); WARN_ON(!ae_node);
dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL; return dev ? cls_to_ae_dev(dev) : NULL;
} }
@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle);
* return handle ptr or ERR_PTR * return handle ptr or ERR_PTR
*/ */
struct hnae_handle *hnae_get_handle(struct device *owner_dev, struct hnae_handle *hnae_get_handle(struct device *owner_dev,
const char *ae_id, u32 port_id, const struct device_node *ae_node,
u32 port_id,
struct hnae_buf_ops *bops) struct hnae_buf_ops *bops)
{ {
struct hnae_ae_dev *dev; struct hnae_ae_dev *dev;
@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
int i, j; int i, j;
int ret; int ret;
dev = find_ae(ae_id); dev = find_ae(ae_node);
if (!dev) if (!dev)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);

View File

@ -524,8 +524,11 @@ struct hnae_handle {
#define ring_to_dev(ring) ((ring)->q->dev->dev) #define ring_to_dev(ring) ((ring)->q->dev->dev)
struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, struct hnae_handle *hnae_get_handle(struct device *owner_dev,
u32 port_id, struct hnae_buf_ops *bops); const struct device_node *ae_node,
u32 port_id,
struct hnae_buf_ops *bops);
void hnae_put_handle(struct hnae_handle *handle); void hnae_put_handle(struct hnae_handle *handle);
int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
void hnae_ae_unregister(struct hnae_ae_dev *dev); void hnae_ae_unregister(struct hnae_ae_dev *dev);

View File

@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
{ {
struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
static atomic_t id = ATOMIC_INIT(-1);
switch (dsaf_dev->dsaf_ver) { switch (dsaf_dev->dsaf_ver) {
case AE_VERSION_1: case AE_VERSION_1:
@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
default: default:
break; break;
} }
snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
(int)atomic_inc_return(&id));
ae_dev->ops = &hns_dsaf_ops; ae_dev->ops = &hns_dsaf_ops;
ae_dev->dev = dsaf_dev->dev; ae_dev->dev = dsaf_dev->dev;

View File

@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
int ret, i; int ret, i;
u32 desc_num; u32 desc_num;
u32 buf_size; u32 buf_size;
const char *name, *mode_str; const char *mode_str;
struct device_node *np = dsaf_dev->dev->of_node; struct device_node *np = dsaf_dev->dev->of_node;
if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else else
dsaf_dev->dsaf_ver = AE_VERSION_2; dsaf_dev->dsaf_ver = AE_VERSION_2;
ret = of_property_read_string(np, "dsa_name", &name);
if (ret) {
dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
return ret;
}
strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
ret = of_property_read_string(np, "mode", &mode_str); ret = of_property_read_string(np, "mode", &mode_str);
if (ret) { if (ret) {
dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);

View File

@ -18,6 +18,7 @@ struct hns_mac_cb;
#define DSAF_DRV_NAME "hns_dsaf" #define DSAF_DRV_NAME "hns_dsaf"
#define DSAF_MOD_VERSION "v1.0" #define DSAF_MOD_VERSION "v1.0"
#define DSAF_DEVICE_NAME "dsaf"
#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000

View File

@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
int ret; int ret;
h = hnae_get_handle(&priv->netdev->dev, h = hnae_get_handle(&priv->netdev->dev,
priv->ae_name, priv->port_id, NULL); priv->ae_node, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) { if (IS_ERR_OR_NULL(h)) {
ret = PTR_ERR(h); ret = PTR_ERR(h);
dev_dbg(priv->dev, "has not handle, register notifier!\n"); dev_dbg(priv->dev, "has not handle, register notifier!\n");
@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
else else
priv->enet_ver = AE_VERSION_2; priv->enet_ver = AE_VERSION_2;
ret = of_property_read_string(node, "ae-name", &priv->ae_name); priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
if (ret) if (IS_ERR_OR_NULL(priv->ae_node)) {
goto out_read_string_fail; ret = PTR_ERR(priv->ae_node);
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
ret = of_property_read_u32(node, "port-id", &priv->port_id); ret = of_property_read_u32(node, "port-id", &priv->port_id);
if (ret) if (ret)
goto out_read_string_fail; goto out_read_prop_fail;
hns_init_mac_addr(ndev); hns_init_mac_addr(ndev);
@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
out_notify_fail: out_notify_fail:
(void)cancel_work_sync(&priv->service_task); (void)cancel_work_sync(&priv->service_task);
out_read_string_fail: out_read_prop_fail:
free_netdev(ndev); free_netdev(ndev);
return ret; return ret;
} }

View File

@ -51,7 +51,7 @@ struct hns_nic_ops {
}; };
struct hns_nic_priv { struct hns_nic_priv {
const char *ae_name; const struct device_node *ae_node;
u32 enet_ver; u32 enet_ver;
u32 port_id; u32 port_id;
int phy_mode; int phy_mode;

View File

@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
}; };
#endif #endif
#ifdef CONFIG_EISA
static struct eisa_device_id hp100_eisa_tbl[] = { static struct eisa_device_id hp100_eisa_tbl[] = {
{ "HWPF180" }, /* HP J2577 rev A */ { "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */ { "HWP1920" }, /* HP 27248B */
@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
{ "" } /* Mandatory final entry ! */ { "" } /* Mandatory final entry ! */
}; };
MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
static const struct pci_device_id hp100_pci_tbl[] = { static const struct pci_device_id hp100_pci_tbl[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
{} /* Terminating entry */ {} /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
#endif
static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d)
free_netdev(d); free_netdev(d);
} }
#ifdef CONFIG_EISA
static int hp100_eisa_probe(struct device *gendev) static int hp100_eisa_probe(struct device *gendev)
{ {
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
.remove = hp100_eisa_remove, .remove = hp100_eisa_remove,
} }
}; };
#endif
#ifdef CONFIG_PCI
static int hp100_pci_probe(struct pci_dev *pdev, static int hp100_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
.probe = hp100_pci_probe, .probe = hp100_pci_probe,
.remove = hp100_pci_remove, .remove = hp100_pci_remove,
}; };
#endif
/* /*
* module section * module section
@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
err = hp100_isa_init(); err = hp100_isa_init();
if (err && err != -ENODEV) if (err && err != -ENODEV)
goto out; goto out;
#ifdef CONFIG_EISA
err = eisa_driver_register(&hp100_eisa_driver); err = eisa_driver_register(&hp100_eisa_driver);
if (err && err != -ENODEV) if (err && err != -ENODEV)
goto out2; goto out2;
#endif
#ifdef CONFIG_PCI
err = pci_register_driver(&hp100_pci_driver); err = pci_register_driver(&hp100_pci_driver);
if (err && err != -ENODEV) if (err && err != -ENODEV)
goto out3; goto out3;
#endif
out: out:
return err; return err;
out3: out3:
#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver); eisa_driver_unregister (&hp100_eisa_driver);
out2: out2:
#endif
hp100_isa_cleanup(); hp100_isa_cleanup();
goto out; goto out;
} }
@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
static void __exit hp100_module_exit(void) static void __exit hp100_module_exit(void)
{ {
hp100_isa_cleanup(); hp100_isa_cleanup();
#ifdef CONFIG_EISA
eisa_driver_unregister (&hp100_eisa_driver); eisa_driver_unregister (&hp100_eisa_driver);
#endif
#ifdef CONFIG_PCI
pci_unregister_driver (&hp100_pci_driver); pci_unregister_driver (&hp100_pci_driver);
#endif
} }
module_init(hp100_module_init) module_init(hp100_module_init)

View File

@ -7117,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work)
i40e_watchdog_subtask(pf); i40e_watchdog_subtask(pf);
i40e_fdir_reinit_subtask(pf); i40e_fdir_reinit_subtask(pf);
i40e_sync_filters_subtask(pf); i40e_sync_filters_subtask(pf);
#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
i40e_sync_udp_filters_subtask(pf); i40e_sync_udp_filters_subtask(pf);
#endif
i40e_clean_adminq_subtask(pf); i40e_clean_adminq_subtask(pf);
i40e_service_event_complete(pf); i40e_service_event_complete(pf);
@ -8515,6 +8513,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
} }
#endif #endif
#if IS_ENABLED(CONFIG_VXLAN)
/** /**
* i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
* @netdev: This physical port's netdev * @netdev: This physical port's netdev
@ -8524,7 +8524,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port) sa_family_t sa_family, __be16 port)
{ {
#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
@ -8557,7 +8556,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
#endif
} }
/** /**
@ -8569,7 +8567,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
static void i40e_del_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port) sa_family_t sa_family, __be16 port)
{ {
#if IS_ENABLED(CONFIG_VXLAN)
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
@ -8592,9 +8589,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
ntohs(port)); ntohs(port));
} }
#endif
} }
#endif
#if IS_ENABLED(CONFIG_GENEVE)
/** /**
* i40e_add_geneve_port - Get notifications about GENEVE ports that come up * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
* @netdev: This physical port's netdev * @netdev: This physical port's netdev
@ -8604,7 +8602,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
static void i40e_add_geneve_port(struct net_device *netdev, static void i40e_add_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port) sa_family_t sa_family, __be16 port)
{ {
#if IS_ENABLED(CONFIG_GENEVE)
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
@ -8639,7 +8636,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
#endif
} }
/** /**
@ -8651,7 +8647,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
static void i40e_del_geneve_port(struct net_device *netdev, static void i40e_del_geneve_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port) sa_family_t sa_family, __be16 port)
{ {
#if IS_ENABLED(CONFIG_GENEVE)
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
@ -8677,8 +8672,8 @@ static void i40e_del_geneve_port(struct net_device *netdev,
netdev_warn(netdev, "geneve port %d was not found, not deleting\n", netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
ntohs(port)); ntohs(port));
} }
#endif
} }
#endif
static int i40e_get_phys_port_id(struct net_device *netdev, static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid) struct netdev_phys_item_id *ppid)

View File

@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
struct iphdr *this_ip_hdr; struct iphdr *this_ip_hdr;
u32 network_hdr_len; u32 network_hdr_len;
u8 l4_hdr = 0; u8 l4_hdr = 0;
struct udphdr *oudph; struct udphdr *oudph = NULL;
struct iphdr *oiph; struct iphdr *oiph = NULL;
u32 l4_tunnel = 0; u32 l4_tunnel = 0;
if (skb->encapsulation) { if (skb->encapsulation) {

View File

@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (length <= 8 && (uintptr_t)data & 0x7) { if (length <= 8 && (uintptr_t)data & 0x7) {
/* Copy unaligned small data fragment to TSO header data area */ /* Copy unaligned small data fragment to TSO header data area */
memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
data, length); data, length);
desc->buf_ptr = txq->tso_hdrs_dma desc->buf_ptr = txq->tso_hdrs_dma
+ txq->tx_curr_desc * TSO_HEADER_SIZE; + tx_index * TSO_HEADER_SIZE;
} else { } else {
/* Alignment is okay, map buffer and hand off to hardware */ /* Alignment is okay, map buffer and hand off to hardware */
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;

View File

@ -11,28 +11,28 @@
* warranty of any kind, whether express or implied. * warranty of any kind, whether express or implied.
*/ */
#include <linux/kernel.h> #include <linux/clk.h>
#include <linux/netdevice.h> #include <linux/cpu.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/platform_device.h> #include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mbus.h> #include <linux/mbus.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/interrupt.h> #include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
#include <net/tso.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/of_mdio.h> #include <linux/of_mdio.h>
#include <linux/of_net.h> #include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/clk.h> #include <linux/platform_device.h>
#include <linux/cpu.h> #include <linux/skbuff.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tso.h>
/* Registers */ /* Registers */
#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
@ -373,6 +373,8 @@ struct mvneta_port {
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
/* AXI clock */
struct clk *clk_bus;
u8 mcast_count[256]; u8 mcast_count[256];
u16 tx_ring_size; u16 tx_ring_size;
u16 rx_ring_size; u16 rx_ring_size;
@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
const struct mvneta_statistic *s; const struct mvneta_statistic *s;
void __iomem *base = pp->base; void __iomem *base = pp->base;
u32 high, low, val; u32 high, low, val;
u64 val64;
int i; int i;
for (i = 0, s = mvneta_statistics; for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) { s++, i++) {
val = 0;
switch (s->type) { switch (s->type) {
case T_REG_32: case T_REG_32:
val = readl_relaxed(base + s->offset); val = readl_relaxed(base + s->offset);
pp->ethtool_stats[i] += val;
break; break;
case T_REG_64: case T_REG_64:
/* Docs say to read low 32-bit then high */ /* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset); low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4); high = readl_relaxed(base + s->offset + 4);
val = (u64)high << 32 | low; val64 = (u64)high << 32 | low;
pp->ethtool_stats[i] += val64;
break; break;
} }
pp->ethtool_stats[i] += val;
} }
} }
@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev)
pp->indir[0] = rxq_def; pp->indir[0] = rxq_def;
pp->clk = devm_clk_get(&pdev->dev, NULL); pp->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(pp->clk))
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) { if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk); err = PTR_ERR(pp->clk);
goto err_put_phy_node; goto err_put_phy_node;
@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk); clk_prepare_enable(pp->clk);
pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(pp->clk_bus))
clk_prepare_enable(pp->clk_bus);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->base = devm_ioremap_resource(&pdev->dev, res); pp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pp->base)) { if (IS_ERR(pp->base)) {
@ -3724,6 +3731,7 @@ err_free_stats:
err_free_ports: err_free_ports:
free_percpu(pp->ports); free_percpu(pp->ports);
err_clk: err_clk:
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk); clk_disable_unprepare(pp->clk);
err_put_phy_node: err_put_phy_node:
of_node_put(phy_node); of_node_put(phy_node);
@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev)
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev); unregister_netdev(dev);
clk_disable_unprepare(pp->clk_bus);
clk_disable_unprepare(pp->clk); clk_disable_unprepare(pp->clk);
free_percpu(pp->ports); free_percpu(pp->ports);
free_percpu(pp->stats); free_percpu(pp->stats);

View File

@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
mlxsw_reg_sftr_port_mask_set(payload, port, 1); mlxsw_reg_sftr_port_mask_set(payload, port, 1);
} }
/* SFDF - Switch Filtering DB Flush
* --------------------------------
* The switch filtering DB flush register is used to flush the FDB.
* Note that FDB notifications are flushed as well.
*/
#define MLXSW_REG_SFDF_ID 0x2013
#define MLXSW_REG_SFDF_LEN 0x14
static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
.id = MLXSW_REG_SFDF_ID,
.len = MLXSW_REG_SFDF_LEN,
};
/* reg_sfdf_swid
* Switch partition ID.
* Access: Index
*/
MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
enum mlxsw_reg_sfdf_flush_type {
MLXSW_REG_SFDF_FLUSH_PER_SWID,
MLXSW_REG_SFDF_FLUSH_PER_FID,
MLXSW_REG_SFDF_FLUSH_PER_PORT,
MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
MLXSW_REG_SFDF_FLUSH_PER_LAG,
MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
};
/* reg_sfdf_flush_type
* Flush type.
* 0 - All SWID dynamic entries are flushed.
* 1 - All FID dynamic entries are flushed.
* 2 - All dynamic entries pointing to port are flushed.
* 3 - All FID dynamic entries pointing to port are flushed.
* 4 - All dynamic entries pointing to LAG are flushed.
* 5 - All FID dynamic entries pointing to LAG are flushed.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
/* reg_sfdf_flush_static
* Static.
* 0 - Flush only dynamic entries.
* 1 - Flush both dynamic and static entries.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
static inline void mlxsw_reg_sfdf_pack(char *payload,
enum mlxsw_reg_sfdf_flush_type type)
{
MLXSW_REG_ZERO(sfdf, payload);
mlxsw_reg_sfdf_flush_type_set(payload, type);
mlxsw_reg_sfdf_flush_static_set(payload, true);
}
/* reg_sfdf_fid
* FID to flush.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
/* reg_sfdf_system_port
* Port to flush.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
/* reg_sfdf_port_fid_system_port
* Port to flush, pointed to by FID.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
/* reg_sfdf_lag_id
* LAG ID to flush.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
/* reg_sfdf_lag_fid_lag_id
* LAG ID to flush, pointed to by FID.
* Access: RW
*/
MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
/* SLDR - Switch LAG Descriptor Register /* SLDR - Switch LAG Descriptor Register
* ----------------------------------------- * -----------------------------------------
* The switch LAG descriptor register is populated by LAG descriptors. * The switch LAG descriptor register is populated by LAG descriptors.
@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
* Module number. * Module number.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
/* reg_pmlp_tx_lane /* reg_pmlp_tx_lane
* Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
/* reg_pmlp_rx_lane /* reg_pmlp_rx_lane
* Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
* equal to Tx lane. * equal to Tx lane.
* Access: RW * Access: RW
*/ */
MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
{ {
@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFGC"; return "SFGC";
case MLXSW_REG_SFTR_ID: case MLXSW_REG_SFTR_ID:
return "SFTR"; return "SFTR";
case MLXSW_REG_SFDF_ID:
return "SFDF";
case MLXSW_REG_SLDR_ID: case MLXSW_REG_SLDR_ID:
return "SLDR"; return "SLDR";
case MLXSW_REG_SLCR_ID: case MLXSW_REG_SLCR_ID:

View File

@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
.profile = &mlxsw_sp_config_profile, .profile = &mlxsw_sp_config_profile,
}; };
static int
mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static int
mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
mlxsw_sp_port->local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static int
mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static int
mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char sfdf_pl[MLXSW_REG_SFDF_LEN];
mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
static int
__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
{
int err, last_err = 0;
u16 vid;
for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
if (err)
last_err = err;
}
return last_err;
}
static int
__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
{
int err, last_err = 0;
u16 vid;
for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
if (err)
last_err = err;
}
return last_err;
}
static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
{
if (!list_empty(&mlxsw_sp_port->vports_list))
if (mlxsw_sp_port->lagged)
return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
else
return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
else
if (mlxsw_sp_port->lagged)
return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
else
return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
}
static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
{
u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
u16 fid = mlxsw_sp_vfid_to_fid(vfid);
if (mlxsw_sp_vport->lagged)
return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
fid);
else
return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
}
static bool mlxsw_sp_port_dev_check(const struct net_device *dev) static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
{ {
return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0; return 0;
} }
static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
bool flush_fdb)
{ {
struct net_device *dev = mlxsw_sp_port->dev; struct net_device *dev = mlxsw_sp_port->dev;
if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0; mlxsw_sp_port->uc_flood = 0;
@ -2200,10 +2313,15 @@ err_col_port_enable:
return err; return err;
} }
static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev,
bool flush_fdb);
static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev) struct net_device *lag_dev)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_upper *lag; struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id; u16 lag_id = mlxsw_sp_port->lag_id;
int err; int err;
@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
if (err) if (err)
return err; return err;
/* In case we leave a LAG device that has bridges built on top,
* then their teardown sequence is never issued and we need to
* invoke the necessary cleanup routines ourselves.
*/
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
struct net_device *br_dev;
if (!mlxsw_sp_vport->bridged)
continue;
br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
}
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
if (lag->ref_count == 1)
mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
}
if (lag->ref_count == 1) { if (lag->ref_count == 1) {
if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
if (err) if (err)
return err; return err;
@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
} }
static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev);
static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *vlan_dev) struct net_device *vlan_dev)
{ {
@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *br_dev; struct net_device *br_dev;
br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
} }
mlxsw_sp_vport->dev = mlxsw_sp_port->dev; mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
} }
mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
} else { } else {
err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
true);
mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
if (err) { if (err) {
netdev_err(dev, "Failed to leave bridge\n"); netdev_err(dev, "Failed to leave bridge\n");
@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
} }
static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev) struct net_device *br_dev,
bool flush_fdb)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_vport_flood_set; goto err_vport_flood_set;
} }
if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
netdev_err(dev, "Failed to flush FDB\n");
/* Switch between the vFIDs and destroy the old one if needed. */ /* Switch between the vFIDs and destroy the old one if needed. */
new_vfid->nr_vports++; new_vfid->nr_vports++;
mlxsw_sp_vport->vport.vfid = new_vfid; mlxsw_sp_vport->vport.vfid = new_vfid;
@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
if (!mlxsw_sp_vport) if (!mlxsw_sp_vport)
return NOTIFY_DONE; return NOTIFY_DONE;
err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
upper_dev); upper_dev, true);
if (err) { if (err) {
netdev_err(dev, "Failed to leave bridge\n"); netdev_err(dev, "Failed to leave bridge\n");
return NOTIFY_BAD; return NOTIFY_BAD;

View File

@ -120,7 +120,6 @@ struct mlxsw_sp {
} fdb_notify; } fdb_notify;
#define MLXSW_SP_DEFAULT_AGEING_TIME 300 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time; u32 ageing_time;
struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */
struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
}; };
@ -254,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid); __be16 __always_unused proto, u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc); bool set, bool only_uc);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
#endif #endif

View File

@ -45,6 +45,7 @@
#include <linux/if_bridge.h> #include <linux/if_bridge.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/rtnetlink.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include "spectrum.h" #include "spectrum.h"
@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err; int err;
switch (state) { switch (state) {
case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_FORWARDING: case BR_STATE_FORWARDING:
spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
break; break;
case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_LEARNING: case BR_STATE_LEARNING:
spms_state = MLXSW_REG_SPMS_STATE_LEARNING; spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
break; break;
case BR_STATE_LISTENING: /* fall-through */
case BR_STATE_DISABLED: /* fall-through */
case BR_STATE_BLOCKING: case BR_STATE_BLOCKING:
spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
break; break;
@ -936,6 +937,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
vlan->vid_begin, vlan->vid_end, false); vlan->vid_begin, vlan->vid_end, false);
} }
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
{
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
__mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
}
static int static int
mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_fdb *fdb) const struct switchdev_obj_port_fdb *fdb)
@ -1040,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_obj_port_fdb *fdb, struct switchdev_obj_port_fdb *fdb,
switchdev_obj_dump_cb_t *cb) switchdev_obj_dump_cb_t *cb,
struct net_device *orig_dev)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 vport_vid = 0, vport_fid = 0; struct mlxsw_sp_port *tmp;
u16 vport_fid = 0;
char *sfd_pl; char *sfd_pl;
char mac[ETH_ALEN]; char mac[ETH_ALEN];
u16 fid; u16 fid;
@ -1058,13 +1069,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl) if (!sfd_pl)
return -ENOMEM; return -ENOMEM;
mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
u16 tmp; u16 tmp;
tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
vport_fid = mlxsw_sp_vfid_to_fid(tmp); vport_fid = mlxsw_sp_vfid_to_fid(tmp);
vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
} }
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
&local_port); &local_port);
if (local_port == mlxsw_sp_port->local_port) { if (local_port == mlxsw_sp_port->local_port) {
if (vport_fid && vport_fid != fid) if (vport_fid && vport_fid == fid)
continue; fdb->vid = 0;
else if (vport_fid) else if (!vport_fid &&
fdb->vid = vport_vid; !mlxsw_sp_fid_is_vfid(fid))
else
fdb->vid = fid; fdb->vid = fid;
else
continue;
ether_addr_copy(fdb->addr, mac); ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE; fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj); err = cb(&fdb->obj);
@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
mac, &fid, &lag_id); mac, &fid, &lag_id);
if (mlxsw_sp_port == tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { if (tmp && tmp->local_port ==
if (vport_fid && vport_fid != fid) mlxsw_sp_port->local_port) {
/* LAG records can only point to LAG
* devices or VLAN devices on top.
*/
if (!netif_is_lag_master(orig_dev) &&
!is_vlan_dev(orig_dev))
continue; continue;
else if (vport_fid) if (vport_fid && vport_fid == fid)
fdb->vid = vport_vid; fdb->vid = 0;
else else if (!vport_fid &&
!mlxsw_sp_fid_is_vfid(fid))
fdb->vid = fid; fdb->vid = fid;
else
continue;
ether_addr_copy(fdb->addr, mac); ether_addr_copy(fdb->addr, mac);
fdb->ndm_state = NUD_REACHABLE; fdb->ndm_state = NUD_REACHABLE;
err = cb(&fdb->obj); err = cb(&fdb->obj);
@ -1124,7 +1142,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
out: out:
mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
kfree(sfd_pl); kfree(sfd_pl);
return stored_err ? stored_err : err; return stored_err ? stored_err : err;
} }
@ -1176,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
break; break;
case SWITCHDEV_OBJ_ID_PORT_FDB: case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj), cb); SWITCHDEV_OBJ_PORT_FDB(obj), cb,
obj->orig_dev);
break; break;
default: default:
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
@ -1194,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
}; };
static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
bool adding, char *mac, u16 vid, char *mac, u16 vid,
struct net_device *dev) struct net_device *dev)
{ {
struct switchdev_notifier_fdb_info info; struct switchdev_notifier_fdb_info info;
unsigned long notifier_type; unsigned long notifier_type;
if (learning && learning_sync) { if (learning_sync) {
info.addr = mac; info.addr = mac;
info.vid = vid; info.vid = vid;
notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@ -1237,7 +1255,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove; goto just_remove;
} }
vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); vid = 0;
/* Override the physical port with the vPort. */ /* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport; mlxsw_sp_port = mlxsw_sp_vport;
} else { } else {
@ -1257,8 +1275,7 @@ do_fdb_op:
if (!do_notification) if (!do_notification)
return; return;
mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
mlxsw_sp_port->learning_sync,
adding, mac, vid, mlxsw_sp_port->dev); adding, mac, vid, mlxsw_sp_port->dev);
return; return;
@ -1273,6 +1290,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bool adding) bool adding)
{ {
struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
char mac[ETH_ALEN]; char mac[ETH_ALEN];
u16 lag_vid = 0; u16 lag_vid = 0;
u16 lag_id; u16 lag_id;
@ -1298,11 +1316,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
goto just_remove; goto just_remove;
} }
vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
lag_vid = vid; dev = mlxsw_sp_vport->dev;
vid = 0;
/* Override the physical port with the vPort. */ /* Override the physical port with the vPort. */
mlxsw_sp_port = mlxsw_sp_vport; mlxsw_sp_port = mlxsw_sp_vport;
} else { } else {
dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
vid = fid; vid = fid;
} }
@ -1319,10 +1339,8 @@ do_fdb_op:
if (!do_notification) if (!do_notification)
return; return;
mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
mlxsw_sp_port->learning_sync, vid, dev);
adding, mac, vid,
mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
return; return;
just_remove: just_remove:
@ -1374,7 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
mutex_lock(&mlxsw_sp->fdb_lock); rtnl_lock();
do { do {
mlxsw_reg_sfn_pack(sfn_pl); mlxsw_reg_sfn_pack(sfn_pl);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@ -1387,7 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
} while (num_rec); } while (num_rec);
mutex_unlock(&mlxsw_sp->fdb_lock); rtnl_unlock();
kfree(sfn_pl); kfree(sfn_pl);
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@ -1402,7 +1420,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
return err; return err;
} }
mutex_init(&mlxsw_sp->fdb_lock);
INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);

View File

@ -28,6 +28,16 @@
#include "moxart_ether.h" #include "moxart_ether.h"
static inline void moxart_desc_write(u32 data, u32 *desc)
{
*desc = cpu_to_le32(data);
}
static inline u32 moxart_desc_read(u32 *desc)
{
return le32_to_cpu(*desc);
}
static inline void moxart_emac_write(struct net_device *ndev, static inline void moxart_emac_write(struct net_device *ndev,
unsigned int reg, unsigned long value) unsigned int reg, unsigned long value)
{ {
@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
static void moxart_mac_setup_desc_ring(struct net_device *ndev) static void moxart_mac_setup_desc_ring(struct net_device *ndev)
{ {
struct moxart_mac_priv_t *priv = netdev_priv(ndev); struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void __iomem *desc; void *desc;
int i; int i;
for (i = 0; i < TX_DESC_NUM; i++) { for (i = 0; i < TX_DESC_NUM; i++) {
@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
} }
writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
priv->tx_head = 0; priv->tx_head = 0;
priv->tx_tail = 0; priv->tx_tail = 0;
@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) { for (i = 0; i < RX_DESC_NUM; i++) {
desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
memset(desc, 0, RX_REG_DESC_SIZE); memset(desc, 0, RX_REG_DESC_SIZE);
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
desc + RX_REG_OFFSET_DESC1); desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n"); netdev_err(ndev, "DMA mapping error\n");
writel(priv->rx_mapping[i], moxart_desc_write(priv->rx_mapping[i],
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
writel(priv->rx_buf[i], moxart_desc_write((uintptr_t)priv->rx_buf[i],
desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
} }
writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
priv->rx_head = 0; priv->rx_head = 0;
@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
napi); napi);
struct net_device *ndev = priv->ndev; struct net_device *ndev = priv->ndev;
struct sk_buff *skb; struct sk_buff *skb;
void __iomem *desc; void *desc;
unsigned int desc0, len; unsigned int desc0, len;
int rx_head = priv->rx_head; int rx_head = priv->rx_head;
int rx = 0; int rx = 0;
while (rx < budget) { while (rx < budget) {
desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
desc0 = readl(desc + RX_REG_OFFSET_DESC0); desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
rmb(); /* ensure desc0 is up to date */
if (desc0 & RX_DESC0_DMA_OWN) if (desc0 & RX_DESC0_DMA_OWN)
break; break;
@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
priv->stats.multicast++; priv->stats.multicast++;
rx_next: rx_next:
writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); wmb(); /* prevent setting ownership back too early */
moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
rx_head = RX_NEXT(rx_head); rx_head = RX_NEXT(rx_head);
priv->rx_head = rx_head; priv->rx_head = rx_head;
@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct moxart_mac_priv_t *priv = netdev_priv(ndev); struct moxart_mac_priv_t *priv = netdev_priv(ndev);
void __iomem *desc; void *desc;
unsigned int len; unsigned int len;
unsigned int tx_head = priv->tx_head; unsigned int tx_head = priv->tx_head;
u32 txdes1; u32 txdes1;
@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
spin_lock_irq(&priv->txlock); spin_lock_irq(&priv->txlock);
if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
net_dbg_ratelimited("no TX space for packet\n"); net_dbg_ratelimited("no TX space for packet\n");
priv->stats.tx_dropped++; priv->stats.tx_dropped++;
goto out_unlock; goto out_unlock;
} }
rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_len[tx_head] = len; priv->tx_len[tx_head] = len;
priv->tx_skb[tx_head] = skb; priv->tx_skb[tx_head] = skb;
writel(priv->tx_mapping[tx_head], moxart_desc_write(priv->tx_mapping[tx_head],
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
writel(skb->data, moxart_desc_write((uintptr_t)skb->data,
desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
if (skb->len < ETH_ZLEN) { if (skb->len < ETH_ZLEN) {
@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
if (tx_head == TX_DESC_NUM_MASK) if (tx_head == TX_DESC_NUM_MASK)
txdes1 |= TX_DESC1_END; txdes1 |= TX_DESC1_END;
writel(txdes1, desc + TX_REG_OFFSET_DESC1); moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); wmb(); /* flush descriptor before transferring ownership */
moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
/* start to send packet */ /* start to send packet */
writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);

View File

@ -300,7 +300,7 @@ struct moxart_mac_priv_t {
dma_addr_t rx_base; dma_addr_t rx_base;
dma_addr_t rx_mapping[RX_DESC_NUM]; dma_addr_t rx_mapping[RX_DESC_NUM];
void __iomem *rx_desc_base; void *rx_desc_base;
unsigned char *rx_buf_base; unsigned char *rx_buf_base;
unsigned char *rx_buf[RX_DESC_NUM]; unsigned char *rx_buf[RX_DESC_NUM];
unsigned int rx_head; unsigned int rx_head;
@ -308,7 +308,7 @@ struct moxart_mac_priv_t {
dma_addr_t tx_base; dma_addr_t tx_base;
dma_addr_t tx_mapping[TX_DESC_NUM]; dma_addr_t tx_mapping[TX_DESC_NUM];
void __iomem *tx_desc_base; void *tx_desc_base;
unsigned char *tx_buf_base; unsigned char *tx_buf_base;
unsigned char *tx_buf[RX_DESC_NUM]; unsigned char *tx_buf[RX_DESC_NUM];
unsigned int tx_head; unsigned int tx_head;

View File

@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
} }
#ifdef CONFIG_PCI_MSI
static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{ {
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
if (vdev->config.intr_type == MSI_X) if (vdev->config.intr_type == MSI_X)
pci_disable_msix(vdev->pdev); pci_disable_msix(vdev->pdev);
} }
#endif
static void vxge_rem_isr(struct vxgedev *vdev) static void vxge_rem_isr(struct vxgedev *vdev)
{ {
#ifdef CONFIG_PCI_MSI if (IS_ENABLED(CONFIG_PCI_MSI) &&
if (vdev->config.intr_type == MSI_X) { vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev); vxge_rem_msix_isr(vdev);
} else } else if (vdev->config.intr_type == INTA) {
#endif
if (vdev->config.intr_type == INTA) {
synchronize_irq(vdev->pdev->irq); synchronize_irq(vdev->pdev->irq);
free_irq(vdev->pdev->irq, vdev); free_irq(vdev->pdev->irq, vdev);
} }
@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
static int vxge_add_isr(struct vxgedev *vdev) static int vxge_add_isr(struct vxgedev *vdev)
{ {
int ret = 0; int ret = 0;
#ifdef CONFIG_PCI_MSI
int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
int pci_fun = PCI_FUNC(vdev->pdev->devfn); int pci_fun = PCI_FUNC(vdev->pdev->devfn);
if (vdev->config.intr_type == MSI_X) if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
ret = vxge_enable_msix(vdev); ret = vxge_enable_msix(vdev);
if (ret) { if (ret) {
@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
vdev->config.intr_type = INTA; vdev->config.intr_type = INTA;
} }
if (vdev->config.intr_type == MSI_X) { if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
for (intr_idx = 0; for (intr_idx = 0;
intr_idx < (vdev->no_of_vpath * intr_idx < (vdev->no_of_vpath *
VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].in_use = 1;
vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
} }
INTA_MODE:
#endif
INTA_MODE:
if (vdev->config.intr_type == INTA) { if (vdev->config.intr_type == INTA) {
snprintf(vdev->desc[0], VXGE_INTR_STRLEN, snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
"%s:vxge:INTA", vdev->ndev->name); "%s:vxge:INTA", vdev->ndev->name);
@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
#ifndef CONFIG_PCI_MSI if (!IS_ENABLED(CONFIG_PCI_MSI)) {
vxge_debug_init(VXGE_ERR, vxge_debug_init(VXGE_ERR,
"%s: This Kernel does not support " "%s: This Kernel does not support "
"MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
*intr_type = INTA; *intr_type = INTA;
#endif }
/* Configure whether MSI-X or IRQL. */ /* Configure whether MSI-X or IRQL. */
switch (*intr_type) { switch (*intr_type) {

View File

@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
info.addr = lw->addr; info.addr = lw->addr;
info.vid = lw->vid; info.vid = lw->vid;
rtnl_lock();
if (learned && removing) if (learned && removing)
call_switchdev_notifiers(SWITCHDEV_FDB_DEL, call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
lw->rocker_port->dev, &info.info); lw->rocker_port->dev, &info.info);
else if (learned && !removing) else if (learned && !removing)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD, call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
lw->rocker_port->dev, &info.info); lw->rocker_port->dev, &info.info);
rtnl_unlock();
rocker_port_kfree(lw->trans, work); rocker_port_kfree(lw->trans, work);
} }

View File

@ -389,17 +389,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
if (vio_version_after_eq(&port->vio, 1, 8)) { if (vio_version_after_eq(&port->vio, 1, 8)) {
struct vio_net_dext *dext = vio_net_ext(desc); struct vio_net_dext *dext = vio_net_ext(desc);
skb_reset_network_header(skb);
if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
if (skb->protocol == ETH_P_IP) { if (skb->protocol == ETH_P_IP) {
struct iphdr *iph = (struct iphdr *)skb->data; struct iphdr *iph = ip_hdr(skb);
iph->check = 0; iph->check = 0;
ip_send_check(iph); ip_send_check(iph);
} }
} }
if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
skb->ip_summed == CHECKSUM_NONE) skb->ip_summed == CHECKSUM_NONE) {
vnet_fullcsum(skb); if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4;
skb_reset_transport_header(skb);
skb_set_transport_header(skb, ihl);
vnet_fullcsum(skb);
}
}
if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_level = 0; skb->csum_level = 0;

View File

@ -82,7 +82,7 @@ struct cpdma_desc {
struct cpdma_desc_pool { struct cpdma_desc_pool {
phys_addr_t phys; phys_addr_t phys;
u32 hw_addr; dma_addr_t hw_addr;
void __iomem *iomap; /* ioremap map */ void __iomem *iomap; /* ioremap map */
void *cpumap; /* dma_alloc map */ void *cpumap; /* dma_alloc map */
int desc_size, mem_size; int desc_size, mem_size;
@ -152,7 +152,7 @@ struct cpdma_chan {
* abstract out these details * abstract out these details
*/ */
static struct cpdma_desc_pool * static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align) int size, int align)
{ {
int bitmap_size; int bitmap_size;
@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
if (phys) { if (phys) {
pool->phys = phys; pool->phys = phys;
pool->iomap = ioremap(phys, size); pool->iomap = ioremap(phys, size); /* should be memremap? */
pool->hw_addr = hw_addr; pool->hw_addr = hw_addr;
} else { } else {
pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
GFP_KERNEL); GFP_KERNEL);
pool->iomap = pool->cpumap; pool->iomap = (void __iomem __force *)pool->cpumap;
pool->hw_addr = pool->phys; pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
} }
if (pool->iomap) if (pool->iomap)

View File

@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev)
const char *print_name = dev_name(bdev); const char *print_name = dev_name(bdev);
struct net_device *dev; struct net_device *dev;
DFX_board_t *bp; /* board pointer */ DFX_board_t *bp; /* board pointer */
resource_size_t bar_start[3]; /* pointers to ports */ resource_size_t bar_start[3] = {0}; /* pointers to ports */
resource_size_t bar_len[3]; /* resource length */ resource_size_t bar_len[3] = {0}; /* resource length */
int alloc_size; /* total buffer size used */ int alloc_size; /* total buffer size used */
struct resource *region; struct resource *region;
int err = 0; int err = 0;
@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_pci = dev_is_pci(bdev);
int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev);
int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
resource_size_t bar_start[3]; /* pointers to ports */ resource_size_t bar_start[3] = {0}; /* pointers to ports */
resource_size_t bar_len[3]; /* resource lengths */ resource_size_t bar_len[3] = {0}; /* resource lengths */
int alloc_size; /* total buffer size used */ int alloc_size; /* total buffer size used */
unregister_netdev(dev); unregister_netdev(dev);

View File

@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
opts = ip_tunnel_info_opts(info); opts = ip_tunnel_info_opts(info);
if (key->tun_flags & TUNNEL_CSUM) if (key->tun_flags & TUNNEL_CSUM)
flags |= GENEVE_F_UDP_CSUM; flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
else else
flags &= ~GENEVE_F_UDP_CSUM; flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
err = geneve6_build_skb(dst, skb, key->tun_flags, vni, err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
info->options_len, opts, info->options_len, opts,

View File

@ -624,6 +624,7 @@ struct nvsp_message {
#define RNDIS_PKT_ALIGN_DEFAULT 8 #define RNDIS_PKT_ALIGN_DEFAULT 8
struct multi_send_data { struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
u32 count; /* counter of batched packets */ u32 count; /* counter of batched packets */
}; };

View File

@ -841,6 +841,18 @@ static inline int netvsc_send_pkt(
return ret; return ret;
} }
/* Move packet out of multi send data (msd), and clear msd */
static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
struct sk_buff **msd_skb,
struct multi_send_data *msdp)
{
*msd_skb = msdp->skb;
*msd_send = msdp->pkt;
msdp->skb = NULL;
msdp->pkt = NULL;
msdp->count = 0;
}
int netvsc_send(struct hv_device *device, int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet, struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg, struct rndis_message *rndis_msg,
@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device,
unsigned int section_index = NETVSC_INVALID_INDEX; unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp; struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL;
bool try_batch; bool try_batch;
bool xmit_more = (skb != NULL) ? skb->xmit_more : false; bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device,
net_device->send_section_size) { net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device); section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) { if (section_index != NETVSC_INVALID_INDEX) {
msd_send = msdp->pkt; move_pkt_msd(&msd_send, &msd_skb, msdp);
msdp->pkt = NULL; msd_len = 0;
msdp->count = 0;
msd_len = 0;
} }
} }
@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len; packet->total_data_buflen += msd_len;
} }
if (msdp->pkt) if (msdp->skb)
dev_kfree_skb_any(skb); dev_kfree_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) { if (xmit_more && !packet->cp_partial) {
msdp->skb = skb;
msdp->pkt = packet; msdp->pkt = packet;
msdp->count++; msdp->count++;
} else { } else {
cur_send = packet; cur_send = packet;
msdp->skb = NULL;
msdp->pkt = NULL; msdp->pkt = NULL;
msdp->count = 0; msdp->count = 0;
} }
} else { } else {
msd_send = msdp->pkt; move_pkt_msd(&msd_send, &msd_skb, msdp);
msdp->pkt = NULL;
msdp->count = 0;
cur_send = packet; cur_send = packet;
} }
if (msd_send) { if (msd_send) {
m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb); m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
if (m_ret != 0) { if (m_ret != 0) {
netvsc_free_send_slot(net_device, netvsc_free_send_slot(net_device,
msd_send->send_buf_index); msd_send->send_buf_index);
dev_kfree_skb_any(skb); dev_kfree_skb_any(msd_skb);
} }
} }

View File

@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi; return ppi;
} }
union sub_key {
u64 k;
struct {
u8 pad[3];
u8 kb;
u32 ka;
};
};
/* Toeplitz hash function
* data: network byte order
* return: host byte order
*/
static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
{
union sub_key subk;
int k_next = 4;
u8 dt;
int i, j;
u32 ret = 0;
subk.k = 0;
subk.ka = ntohl(*(u32 *)key);
for (i = 0; i < dlen; i++) {
subk.kb = key[k_next];
k_next = (k_next + 1) % klen;
dt = ((u8 *)data)[i];
for (j = 0; j < 8; j++) {
if (dt & 0x80)
ret ^= subk.ka;
dt <<= 1;
subk.k <<= 1;
}
}
return ret;
}
static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
{
struct flow_keys flow;
int data_len;
if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
!(flow.basic.n_proto == htons(ETH_P_IP) ||
flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
if (flow.basic.ip_proto == IPPROTO_TCP)
data_len = 12;
else
data_len = 8;
*hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
return true;
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback) void *accel_priv, select_queue_fallback_t fallback)
{ {
@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0; return 0;
if (netvsc_set_hash(&hash, skb)) { hash = skb_get_hash(skb);
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues; ndev->real_num_tx_queues;
skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
}
if (!nvsc_dev->chn_table[q_idx]) if (!nvsc_dev->chn_table[q_idx])
q_idx = 0; q_idx = 0;

View File

@ -82,9 +82,6 @@ struct bfin_sir_self {
#define DRIVER_NAME "bfin_sir" #define DRIVER_NAME "bfin_sir"
#define port_membase(port) (((struct bfin_sir_port *)(port))->membase)
#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr)
#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
#include <asm/bfin_serial.h> #include <asm/bfin_serial.h>
static const unsigned short per[][4] = { static const unsigned short per[][4] = {

View File

@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
list_add_tail_rcu(&vlan->list, &port->vlans); list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev); netif_stacked_transfer_operstate(lowerdev, dev);
linkwatch_fire_event(dev);
return 0; return 0;
@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused,
port = macvlan_port_get_rtnl(dev); port = macvlan_port_get_rtnl(dev);
switch (event) { switch (event) {
case NETDEV_UP:
case NETDEV_CHANGE: case NETDEV_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) list_for_each_entry(vlan, &port->vlans, list)
netif_stacked_transfer_operstate(vlan->lowerdev, netif_stacked_transfer_operstate(vlan->lowerdev,

View File

@ -186,6 +186,7 @@ config MDIO_GPIO
config MDIO_OCTEON config MDIO_OCTEON
tristate "Support for MDIO buses on Octeon and ThunderX SOCs" tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
depends on 64BIT depends on 64BIT
depends on HAS_IOMEM
help help
This module provides a driver for the Octeon and ThunderX MDIO This module provides a driver for the Octeon and ThunderX MDIO

View File

@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps *shhwtstamps = NULL; struct skb_shared_hwtstamps *shhwtstamps = NULL;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
u8 overflow;
overflow = (phy_rxts->ns_hi >> 14) & 0x3;
if (overflow)
pr_debug("rx timestamp queue overflow, count %d\n", overflow);
spin_lock_irqsave(&dp83640->rx_lock, flags); spin_lock_irqsave(&dp83640->rx_lock, flags);
@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640,
struct skb_shared_hwtstamps shhwtstamps; struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb; struct sk_buff *skb;
u64 ns; u64 ns;
u8 overflow;
/* We must already have the skb that triggered this. */ /* We must already have the skb that triggered this. */
@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640,
pr_debug("have timestamp but tx_queue empty\n"); pr_debug("have timestamp but tx_queue empty\n");
return; return;
} }
overflow = (phy_txts->ns_hi >> 14) & 0x3;
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
skb_complete_tx_timestamp(skb, NULL);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;
}
ns = phy2txts(phy_txts); ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps)); memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns); shhwtstamps.hwtstamp = ns_to_ktime(ns);

View File

@ -692,25 +692,29 @@ void phy_change(struct work_struct *work)
struct phy_device *phydev = struct phy_device *phydev =
container_of(work, struct phy_device, phy_queue); container_of(work, struct phy_device, phy_queue);
if (phydev->drv->did_interrupt && if (phy_interrupt_is_valid(phydev)) {
!phydev->drv->did_interrupt(phydev)) if (phydev->drv->did_interrupt &&
goto ignore; !phydev->drv->did_interrupt(phydev))
goto ignore;
if (phy_disable_interrupts(phydev)) if (phy_disable_interrupts(phydev))
goto phy_err; goto phy_err;
}
mutex_lock(&phydev->lock); mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK; phydev->state = PHY_CHANGELINK;
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
atomic_dec(&phydev->irq_disable); if (phy_interrupt_is_valid(phydev)) {
enable_irq(phydev->irq); atomic_dec(&phydev->irq_disable);
enable_irq(phydev->irq);
/* Reenable interrupts */ /* Reenable interrupts */
if (PHY_HALTED != phydev->state && if (PHY_HALTED != phydev->state &&
phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
goto irq_enable_err; goto irq_enable_err;
}
/* reschedule state queue work to run as soon as possible */ /* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue); cancel_delayed_work_sync(&phydev->state_queue);
@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev);
break; break;
case PHY_RUNNING: case PHY_RUNNING:
/* Only register a CHANGE if we are polling or ignoring /* Only register a CHANGE if we are polling and link changed
* interrupts and link changed since latest checking. * since latest checking.
*/ */
if (!phy_interrupt_is_valid(phydev)) { if (phydev->irq == PHY_POLL) {
old_link = phydev->link; old_link = phydev->link;
err = phy_read_status(phydev); err = phy_read_status(phydev);
if (err) if (err)
@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work)
phy_state_to_str(old_state), phy_state_to_str(old_state),
phy_state_to_str(phydev->state)); phy_state_to_str(phydev->state));
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, /* Only re-schedule a PHY state machine change if we are polling the
PHY_STATE_TIME * HZ); * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
* between states from phy_mac_interrupt()
*/
if (phydev->irq == PHY_POLL)
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
PHY_STATE_TIME * HZ);
} }
void phy_mac_interrupt(struct phy_device *phydev, int new_link) void phy_mac_interrupt(struct phy_device *phydev, int new_link)
{ {
cancel_work_sync(&phydev->phy_queue);
phydev->link = new_link; phydev->link = new_link;
schedule_work(&phydev->phy_queue);
/* Trigger a state machine change */
queue_work(system_power_efficient_wq, &phydev->phy_queue);
} }
EXPORT_SYMBOL(phy_mac_interrupt); EXPORT_SYMBOL(phy_mac_interrupt);

View File

@ -24,6 +24,10 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/smscphy.h> #include <linux/smscphy.h>
struct smsc_phy_priv {
bool energy_enable;
};
static int smsc_phy_config_intr(struct phy_device *phydev) static int smsc_phy_config_intr(struct phy_device *phydev)
{ {
int rc = phy_write (phydev, MII_LAN83C185_IM, int rc = phy_write (phydev, MII_LAN83C185_IM,
@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev)
{ {
int __maybe_unused len; struct smsc_phy_priv *priv = phydev->priv;
struct device *dev __maybe_unused = &phydev->mdio.dev;
struct device_node *of_node __maybe_unused = dev->of_node;
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
int enable_energy = 1;
if (rc < 0) if (rc < 0)
return rc; return rc;
if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) if (priv->energy_enable) {
enable_energy = 0;
if (enable_energy) {
/* Enable energy detect mode for this SMSC Transceivers */ /* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN); rc | MII_LAN83C185_EDPWRDOWN);
@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev)
*/ */
static int lan87xx_read_status(struct phy_device *phydev) static int lan87xx_read_status(struct phy_device *phydev)
{ {
int err = genphy_read_status(phydev); struct smsc_phy_priv *priv = phydev->priv;
int i;
int err = genphy_read_status(phydev);
if (!phydev->link && priv->energy_enable) {
int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */ /* Disable EDPD to wake up PHY */
int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) if (rc < 0)
@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev)
return err; return err;
} }
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->energy_enable = true;
if (of_property_read_bool(of_node, "smsc,disable-energy-detect"))
priv->energy_enable = false;
phydev->priv = priv;
return 0;
}
static struct phy_driver smsc_phy_driver[] = { static struct phy_driver smsc_phy_driver[] = {
{ {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = genphy_read_status, .read_status = genphy_read_status,
@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = genphy_read_status, .read_status = genphy_read_status,
@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status, .read_status = lan87xx_read_status,
@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = genphy_read_status, .read_status = genphy_read_status,
@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status, .read_status = lan87xx_read_status,
@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = {
| SUPPORTED_Asym_Pause), | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
.probe = smsc_phy_probe,
/* basic functions */ /* basic functions */
.config_aneg = genphy_config_aneg, .config_aneg = genphy_config_aneg,
.read_status = lan87xx_read_status, .read_status = lan87xx_read_status,

View File

@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
return i < MAX_CALLID; return i < MAX_CALLID;
} }
static int add_chan(struct pppox_sock *sock) static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{ {
static int call_id; static int call_id;
spin_lock(&chan_lock); spin_lock(&chan_lock);
if (!sock->proto.pptp.src_addr.call_id) { if (!sa->call_id) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
if (call_id == MAX_CALLID) { if (call_id == MAX_CALLID) {
call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
if (call_id == MAX_CALLID) if (call_id == MAX_CALLID)
goto out_err; goto out_err;
} }
sock->proto.pptp.src_addr.call_id = call_id; sa->call_id = call_id;
} else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) } else if (test_bit(sa->call_id, callid_bitmap)) {
goto out_err; goto out_err;
}
set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); sock->proto.pptp.src_addr = *sa;
rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); set_bit(sa->call_id, callid_bitmap);
rcu_assign_pointer(callid_sock[sa->call_id], sock);
spin_unlock(&chan_lock); spin_unlock(&chan_lock);
return 0; return 0;
@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
int error = 0; int error = 0;
if (sockaddr_len < sizeof(struct sockaddr_pppox)) if (sockaddr_len < sizeof(struct sockaddr_pppox))
@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk); lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp; if (sk->sk_state & PPPOX_DEAD) {
if (add_chan(po)) error = -EALREADY;
error = -EBUSY; goto out;
}
if (sk->sk_state & PPPOX_BOUND) {
error = -EBUSY;
goto out;
}
if (add_chan(po, &sp->sa_addr.pptp))
error = -EBUSY;
else
sk->sk_state |= PPPOX_BOUND;
out:
release_sock(sk); release_sock(sk);
return error; return error;
} }
@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
} }
opt->dst_addr = sp->sa_addr.pptp; opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state = PPPOX_CONNECTED; sk->sk_state |= PPPOX_CONNECTED;
end: end:
release_sock(sk); release_sock(sk);

View File

@ -36,7 +36,7 @@
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
#define DRIVER_NAME "lan78xx" #define DRIVER_NAME "lan78xx"
#define DRIVER_VERSION "1.0.1" #define DRIVER_VERSION "1.0.2"
#define TX_TIMEOUT_JIFFIES (5 * HZ) #define TX_TIMEOUT_JIFFIES (5 * HZ)
#define THROTTLE_JIFFIES (HZ / 8) #define THROTTLE_JIFFIES (HZ / 8)
@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data) u32 length, u8 *data)
{ {
u32 val; u32 val;
u32 saved;
int i, ret; int i, ret;
int retval;
ret = lan78xx_eeprom_confirm_not_busy(dev); /* depends on chip, some EEPROM pins are muxed with LED function.
if (ret) * disable & restore LED function to access EEPROM.
return ret; */
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
retval = lan78xx_eeprom_confirm_not_busy(dev);
if (retval)
return retval;
for (i = 0; i < length; i++) { for (i = 0; i < length; i++) {
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_); val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val); ret = lan78xx_write_reg(dev, E2P_CMD, val);
if (unlikely(ret < 0)) if (unlikely(ret < 0)) {
return -EIO; retval = -EIO;
goto exit;
}
ret = lan78xx_wait_eeprom(dev); retval = lan78xx_wait_eeprom(dev);
if (ret < 0) if (retval < 0)
return ret; goto exit;
ret = lan78xx_read_reg(dev, E2P_DATA, &val); ret = lan78xx_read_reg(dev, E2P_DATA, &val);
if (unlikely(ret < 0)) if (unlikely(ret < 0)) {
return -EIO; retval = -EIO;
goto exit;
}
data[i] = val & 0xFF; data[i] = val & 0xFF;
offset++; offset++;
} }
return 0; retval = 0;
exit:
if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
} }
static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data) u32 length, u8 *data)
{ {
u32 val; u32 val;
u32 saved;
int i, ret; int i, ret;
int retval;
ret = lan78xx_eeprom_confirm_not_busy(dev); /* depends on chip, some EEPROM pins are muxed with LED function.
if (ret) * disable & restore LED function to access EEPROM.
return ret; */
ret = lan78xx_read_reg(dev, HW_CFG, &val);
saved = val;
if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
ret = lan78xx_write_reg(dev, HW_CFG, val);
}
retval = lan78xx_eeprom_confirm_not_busy(dev);
if (retval)
goto exit;
/* Issue write/erase enable command */ /* Issue write/erase enable command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
ret = lan78xx_write_reg(dev, E2P_CMD, val); ret = lan78xx_write_reg(dev, E2P_CMD, val);
if (unlikely(ret < 0)) if (unlikely(ret < 0)) {
return -EIO; retval = -EIO;
goto exit;
}
ret = lan78xx_wait_eeprom(dev); retval = lan78xx_wait_eeprom(dev);
if (ret < 0) if (retval < 0)
return ret; goto exit;
for (i = 0; i < length; i++) { for (i = 0; i < length; i++) {
/* Fill data register */ /* Fill data register */
val = data[i]; val = data[i];
ret = lan78xx_write_reg(dev, E2P_DATA, val); ret = lan78xx_write_reg(dev, E2P_DATA, val);
if (ret < 0) if (ret < 0) {
return ret; retval = -EIO;
goto exit;
}
/* Send "write" command */ /* Send "write" command */
val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
val |= (offset & E2P_CMD_EPC_ADDR_MASK_); val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
ret = lan78xx_write_reg(dev, E2P_CMD, val); ret = lan78xx_write_reg(dev, E2P_CMD, val);
if (ret < 0) if (ret < 0) {
return ret; retval = -EIO;
goto exit;
}
ret = lan78xx_wait_eeprom(dev); retval = lan78xx_wait_eeprom(dev);
if (ret < 0) if (retval < 0)
return ret; goto exit;
offset++; offset++;
} }
return 0; retval = 0;
exit:
if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
ret = lan78xx_write_reg(dev, HW_CFG, saved);
return retval;
} }
static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (!phydev->link && dev->link_on) { if (!phydev->link && dev->link_on) {
dev->link_on = false; dev->link_on = false;
netif_carrier_off(dev->net);
/* reset MAC */ /* reset MAC */
ret = lan78xx_read_reg(dev, MAC_CR, &buf); ret = lan78xx_read_reg(dev, MAC_CR, &buf);
@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_write_reg(dev, MAC_CR, buf); ret = lan78xx_write_reg(dev, MAC_CR, buf);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return -EIO; return -EIO;
phy_mac_interrupt(phydev, 0);
} else if (phydev->link && !dev->link_on) { } else if (phydev->link && !dev->link_on) {
dev->link_on = true; dev->link_on = true;
@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
netif_carrier_on(dev->net); phy_mac_interrupt(phydev, 1);
} }
return ret; return ret;
@ -1495,7 +1540,6 @@ done:
static int lan78xx_mdio_init(struct lan78xx_net *dev) static int lan78xx_mdio_init(struct lan78xx_net *dev)
{ {
int ret; int ret;
int i;
dev->mdiobus = mdiobus_alloc(); dev->mdiobus = mdiobus_alloc();
if (!dev->mdiobus) { if (!dev->mdiobus) {
@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
dev->udev->bus->busnum, dev->udev->devnum); dev->udev->bus->busnum, dev->udev->devnum);
/* handle our own interrupt */
for (i = 0; i < PHY_MAX_ADDR; i++)
dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
switch (dev->devid & ID_REV_CHIP_ID_MASK_) { switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
case 0x78000000: case 0x78000000:
case 0x78500000: case 0x78500000:
@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
return -EIO; return -EIO;
} }
/* Enable PHY interrupts.
* We handle our own interrupt
*/
ret = phy_read(phydev, LAN88XX_INT_STS);
ret = phy_write(phydev, LAN88XX_INT_MASK,
LAN88XX_INT_MASK_MDINTPIN_EN_ |
LAN88XX_INT_MASK_LINK_CHANGE_);
phydev->irq = PHY_IGNORE_INTERRUPT;
ret = phy_connect_direct(dev->net, phydev, ret = phy_connect_direct(dev->net, phydev,
lan78xx_link_status_change, lan78xx_link_status_change,
PHY_INTERFACE_MODE_GMII); PHY_INTERFACE_MODE_GMII);
@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
SUPPORTED_Pause | SUPPORTED_Asym_Pause); SUPPORTED_Pause | SUPPORTED_Asym_Pause);
genphy_config_aneg(phydev); genphy_config_aneg(phydev);
/* Workaround to enable PHY interrupt.
* phy_start_interrupts() is API for requesting and enabling
* PHY interrupt. However, USB-to-Ethernet device can't use
* request_irq() called in phy_start_interrupts().
* Set PHY to PHY_HALTED and call phy_start()
* to make a call to phy_enable_interrupts()
*/
phy_stop(phydev);
phy_start(phydev); phy_start(phydev);
netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
if (skb2) { if (skb2) {
skb_queue_tail(&dev->txq_pend, skb2); skb_queue_tail(&dev->txq_pend, skb2);
if (skb_queue_len(&dev->txq_pend) > 10) /* throttle TX patch at slower than SUPER SPEED USB */
if ((dev->udev->speed < USB_SPEED_SUPER) &&
(skb_queue_len(&dev->txq_pend) > 10))
netif_stop_queue(net); netif_stop_queue(net);
} else { } else {
netif_dbg(dev, tx_err, dev->net, netif_dbg(dev, tx_err, dev->net,

View File

@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
static int vxlan_net_id; static int vxlan_net_id;
static struct rtnl_link_ops vxlan_link_ops; static struct rtnl_link_ops vxlan_link_ops;
static const u8 all_zeros_mac[ETH_ALEN]; static const u8 all_zeros_mac[ETH_ALEN + 2];
static int vxlan_sock_add(struct vxlan_dev *vxlan); static int vxlan_sock_add(struct vxlan_dev *vxlan);
@ -1985,11 +1985,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.port_max, true); vxlan->cfg.port_max, true);
if (info) { if (info) {
if (info->key.tun_flags & TUNNEL_CSUM)
flags |= VXLAN_F_UDP_CSUM;
else
flags &= ~VXLAN_F_UDP_CSUM;
ttl = info->key.ttl; ttl = info->key.ttl;
tos = info->key.tos; tos = info->key.tos;
@ -2004,8 +1999,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop; goto drop;
sk = vxlan->vn4_sock->sock->sk; sk = vxlan->vn4_sock->sock->sk;
if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) if (info) {
df = htons(IP_DF); if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
df = htons(IP_DF);
if (info->key.tun_flags & TUNNEL_CSUM)
flags |= VXLAN_F_UDP_CSUM;
else
flags &= ~VXLAN_F_UDP_CSUM;
}
memset(&fl4, 0, sizeof(fl4)); memset(&fl4, 0, sizeof(fl4));
fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
@ -2101,6 +2103,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
return; return;
} }
if (info) {
if (info->key.tun_flags & TUNNEL_CSUM)
flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
else
flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
}
ttl = ttl ? : ip6_dst_hoplimit(ndst); ttl = ttl ? : ip6_dst_hoplimit(ndst);
err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
0, ttl, src_port, dst_port, htonl(vni << 8), md, 0, ttl, src_port, dst_port, htonl(vni << 8), md,

View File

@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
return -EIO; return -EIO;
} }
if (magic == AR5416_EEPROM_MAGIC) { *swap_needed = false;
*swap_needed = false; if (swab16(magic) == AR5416_EEPROM_MAGIC) {
} else if (swab16(magic) == AR5416_EEPROM_MAGIC) {
if (ah->ah_flags & AH_NO_EEP_SWAP) { if (ah->ah_flags & AH_NO_EEP_SWAP) {
ath_info(common, ath_info(common,
"Ignoring endianness difference in EEPROM magic bytes.\n"); "Ignoring endianness difference in EEPROM magic bytes.\n");
*swap_needed = false;
} else { } else {
*swap_needed = true; *swap_needed = true;
} }
} else { } else if (magic != AR5416_EEPROM_MAGIC) {
if (ath9k_hw_use_flash(ah))
return 0;
ath_err(common, ath_err(common,
"Invalid EEPROM Magic (0x%04x).\n", magic); "Invalid EEPROM Magic (0x%04x).\n", magic);
return -EINVAL; return -EINVAL;

View File

@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
return 0; return 0;
} }
static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
{ {
struct sdio_func *func;
struct mmc_host *host;
uint max_blocks;
uint nents; uint nents;
int err; int err;
func = sdiodev->func[2];
host = func->card->host;
sdiodev->sg_support = host->max_segs > 1;
max_blocks = min_t(uint, host->max_blk_count, 511u);
sdiodev->max_request_size = min_t(uint, host->max_req_size,
max_blocks * func->cur_blksize);
sdiodev->max_segment_count = min_t(uint, host->max_segs,
SG_MAX_SINGLE_ALLOC);
sdiodev->max_segment_size = host->max_seg_size;
if (!sdiodev->sg_support) if (!sdiodev->sg_support)
return; return;
@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{ {
struct sdio_func *func;
struct mmc_host *host;
uint max_blocks;
int ret = 0; int ret = 0;
sdiodev->num_funcs = 2; sdiodev->num_funcs = 2;
@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
goto out; goto out;
} }
/*
* determine host related variables after brcmf_sdiod_probe()
* as func->cur_blksize is properly set and F2 init has been
* completed successfully.
*/
func = sdiodev->func[2];
host = func->card->host;
sdiodev->sg_support = host->max_segs > 1;
max_blocks = min_t(uint, host->max_blk_count, 511u);
sdiodev->max_request_size = min_t(uint, host->max_req_size,
max_blocks * func->cur_blksize);
sdiodev->max_segment_count = min_t(uint, host->max_segs,
SG_MAX_SINGLE_ALLOC);
sdiodev->max_segment_size = host->max_seg_size;
/* allocate scatter-gather table. sg support
* will be disabled upon allocation failure.
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
ret = brcmf_sdiod_freezer_attach(sdiodev); ret = brcmf_sdiod_freezer_attach(sdiodev);
if (ret) if (ret)
goto out; goto out;
@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV; ret = -ENODEV;
goto out; goto out;
} }
brcmf_sdiod_host_fixup(host); brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
out: out:
if (ret) if (ret)
brcmf_sdiod_remove(sdiodev); brcmf_sdiod_remove(sdiodev);

View File

@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/module.h>
#include <brcmu_wifi.h> #include <brcmu_wifi.h>
#include <brcmu_utils.h> #include <brcmu_utils.h>
#include "core.h" #include "core.h"

View File

@ -4114,6 +4114,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
goto fail; goto fail;
} }
/* allocate scatter-gather table. sg support
* will be disabled upon allocation failure.
*/
brcmf_sdiod_sgtable_alloc(bus->sdiodev);
/* Query the F2 block size, set roundup accordingly */ /* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize); bus->roundup = min(max_roundup, bus->blocksize);

View File

@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
/* Issue an abort to the specified function */ /* Issue an abort to the specified function */
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state); enum brcmf_sdiod_state state);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP

View File

@ -74,16 +74,19 @@
#define IWL7260_UCODE_API_MAX 17 #define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17
#define IWL7265D_UCODE_API_MAX 20 #define IWL7265D_UCODE_API_MAX 20
#define IWL3168_UCODE_API_MAX 20
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13 #define IWL7260_UCODE_API_OK 13
#define IWL7265_UCODE_API_OK 13 #define IWL7265_UCODE_API_OK 13
#define IWL7265D_UCODE_API_OK 13 #define IWL7265D_UCODE_API_OK 13
#define IWL3168_UCODE_API_OK 20
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 13 #define IWL7260_UCODE_API_MIN 13
#define IWL7265_UCODE_API_MIN 13 #define IWL7265_UCODE_API_MIN 13
#define IWL7265D_UCODE_API_MIN 13 #define IWL7265D_UCODE_API_MIN 13
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */ /* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d #define IWL7260_NVM_VERSION 0x0a1d
@ -92,6 +95,8 @@
#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3165_NVM_VERSION 0x709 #define IWL3165_NVM_VERSION 0x709
#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3168_NVM_VERSION 0xd01
#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_NVM_VERSION 0x0a1d
#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265D_NVM_VERSION 0x0c11 #define IWL7265D_NVM_VERSION 0x0c11
@ -109,6 +114,9 @@
#define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_FW_PRE "iwlwifi-3160-"
#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
#define IWL3168_FW_PRE "iwlwifi-3168-"
#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
#define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.ucode_api_ok = IWL7265_UCODE_API_OK, \ .ucode_api_ok = IWL7265_UCODE_API_OK, \
.ucode_api_min = IWL7265_UCODE_API_MIN .ucode_api_min = IWL7265_UCODE_API_MIN
#define IWL_DEVICE_3008 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL3168_UCODE_API_MAX, \
.ucode_api_ok = IWL3168_UCODE_API_OK, \
.ucode_api_min = IWL3168_UCODE_API_MIN
#define IWL_DEVICE_7005D \ #define IWL_DEVICE_7005D \
IWL_DEVICE_7000_COMMON, \ IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265D_UCODE_API_MAX, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \
@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
const struct iwl_cfg iwl3168_2ac_cfg = { const struct iwl_cfg iwl3168_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3168", .name = "Intel(R) Dual Band Wireless AC 3168",
.fw_name_pre = IWL7265D_FW_PRE, .fw_name_pre = IWL3168_FW_PRE,
IWL_DEVICE_7000, IWL_DEVICE_3008,
.ht_params = &iwl7000_ht_params, .ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION, .nvm_ver = IWL3168_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION, .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN, .dccm_len = IWL7265_DCCM_LEN,
}; };
@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));

View File

@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp {
* @scd_ssn: the index of the last contiguously sent packet * @scd_ssn: the index of the last contiguously sent packet
* @txed: number of Txed frames in this batch * @txed: number of Txed frames in this batch
* @txed_2_done: number of Acked frames in this batch * @txed_2_done: number of Acked frames in this batch
* @reduced_txp: power reduced according to TPC. This is the actual value and
* not a copy from the LQ command. Thus, if not the first rate was used
* for Tx-ing then this value will be set to 0 by FW.
*/ */
struct iwl_mvm_ba_notif { struct iwl_mvm_ba_notif {
__le32 sta_addr_lo32; __le32 sta_addr_lo32;
@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif {
__le16 scd_ssn; __le16 scd_ssn;
u8 txed; u8 txed;
u8 txed_2_done; u8 txed_2_done;
__le16 reserved1; u8 reduced_txp;
u8 reserved1;
} __packed; } __packed;
/* /*

View File

@ -2,6 +2,7 @@
* *
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as * under the terms of version 2 of the GNU General Public License as
@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm,
return 0; return 0;
} }
static int rs_collect_tx_data(struct iwl_mvm *mvm, static int rs_collect_tpc_data(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta, struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes, int scale_index, int attempts, int successes,
u8 reduced_txp) u8 reduced_txp)
{
struct iwl_rate_scale_data *window = NULL;
if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
return -EINVAL;
window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window);
}
static int rs_collect_tlc_data(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes)
{ {
struct iwl_rate_scale_data *window = NULL; struct iwl_rate_scale_data *window = NULL;
int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL; return -EINVAL;
@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm,
/* Select window for current tx bit rate */ /* Select window for current tx bit rate */
window = &(tbl->win[scale_index]); window = &(tbl->win[scale_index]);
ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window);
if (ret)
return ret;
if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
return -EINVAL;
window = &tbl->tpc_win[reduced_txp];
return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
window); window);
} }
@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
* first index into rate scale table. * first index into rate scale table.
*/ */
if (info->flags & IEEE80211_TX_STAT_AMPDU) { if (info->flags & IEEE80211_TX_STAT_AMPDU) {
/* ampdu_ack_len = 0 marks no BA was received. In this case rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
* treat it as a single frame loss as we don't want the success info->status.ampdu_len,
* ratio to dip too quickly because a BA wasn't received info->status.ampdu_ack_len,
reduced_txp);
/* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
* it as a single frame loss as we don't want the success ratio
* to dip too quickly because a BA wasn't received.
* For TPC, there's no need for this optimisation since we want
* to recover very quickly from a bad power reduction and,
* therefore we'd like the success ratio to get an immediate hit
* when failing to get a BA, so we'd switch back to a lower or
* zero power reduction. When FW transmits agg with a rate
* different from the initial rate, it will not use reduced txp
* and will send BA notification twice (one empty with reduced
* txp equal to the value from LQ and one with reduced txp 0).
* We need to update counters for each txp level accordingly.
*/ */
if (info->status.ampdu_ack_len == 0) if (info->status.ampdu_ack_len == 0)
info->status.ampdu_len = 1; info->status.ampdu_len = 1;
rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
info->status.ampdu_len, info->status.ampdu_len,
info->status.ampdu_ack_len, info->status.ampdu_ack_len);
reduced_txp);
/* Update success/fail counts if not searching for new mode */ /* Update success/fail counts if not searching for new mode */
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
else else
continue; continue;
rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
1, i < retries ? 0 : legacy_success, lq_rate.index, 1,
reduced_txp); i < retries ? 0 : legacy_success,
reduced_txp);
rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
lq_rate.index, 1,
i < retries ? 0 : legacy_success);
} }
/* Update success/fail counts if not searching for new mode */ /* Update success/fail counts if not searching for new mode */

View File

@ -1029,7 +1029,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvmsta->tid_data[tid].rate_n_flags = mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate); le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
mvmsta->tid_data[tid].tx_time = mvmsta->tid_data[tid].tx_time =
le16_to_cpu(tx_resp->wireless_media_time); le16_to_cpu(tx_resp->wireless_media_time);
} }
@ -1060,7 +1059,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
/* TODO: not accounted if the whole A-MPDU failed */ /* TODO: not accounted if the whole A-MPDU failed */
info->status.tx_time = tid_data->tx_time; info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] = info->status.status_driver_data[0] =
(void *)(uintptr_t)tid_data->reduced_tpc; (void *)(uintptr_t)ba_notif->reduced_txp;
info->status.status_driver_data[1] = info->status.status_driver_data[1] =
(void *)(uintptr_t)tid_data->rate_n_flags; (void *)(uintptr_t)tid_data->rate_n_flags;
} }
@ -1133,6 +1132,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
scd_flow, ba_resp_scd_ssn, ba_notif->txed, scd_flow, ba_resp_scd_ssn, ba_notif->txed,
ba_notif->txed_2_done); ba_notif->txed_2_done);
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
tid_data->next_reclaimed = ba_resp_scd_ssn; tid_data->next_reclaimed = ba_resp_scd_ssn;
iwl_mvm_check_ratid_empty(mvm, sta, tid); iwl_mvm_check_ratid_empty(mvm, sta, tid);

View File

@ -378,7 +378,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
/* 3168 Series */ /* 3168 Series */
{IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
/* 7265 Series */ /* 7265 Series */
@ -475,6 +478,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
/* 9000 Series */ /* 9000 Series */
{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},

View File

@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
goto nla_put_failure; goto nla_put_failure;
} }
if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2)) if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
ETH_ALEN, data->addresses[1].addr))
goto nla_put_failure; goto nla_put_failure;
/* We get the skb->data */ /* We get the skb->data */
@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
spin_lock_bh(&hwsim_radio_lock); spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry(data, &hwsim_radios, list) { list_for_each_entry(data, &hwsim_radios, list) {
if (mac80211_hwsim_addr_match(data, addr)) { if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
_found = true; _found = true;
break; break;
} }

View File

@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL)); !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1); rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count); !rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);

View File

@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
!(filter_flags & FIF_CONTROL)); !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1); rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count); !rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,

View File

@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
!(filter_flags & FIF_CONTROL)); !(filter_flags & FIF_CONTROL));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count); !rt2x00dev->intf_ap_count);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,

View File

@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_FCSFAIL)); !(filter_flags & FIF_FCSFAIL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR, rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST, rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,

View File

@ -669,6 +669,7 @@ enum rt2x00_state_flags {
CONFIG_POWERSAVING, CONFIG_POWERSAVING,
CONFIG_HT_DISABLED, CONFIG_HT_DISABLED,
CONFIG_QOS_DISABLED, CONFIG_QOS_DISABLED,
CONFIG_MONITORING,
/* /*
* Mark we currently are sequentially reading TX_STA_FIFO register * Mark we currently are sequentially reading TX_STA_FIFO register

View File

@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
else else
clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
if (conf->flags & IEEE80211_CONF_MONITOR)
set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
else
clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
rt2x00dev->curr_band = conf->chandef.chan->band; rt2x00dev->curr_band = conf->chandef.chan->band;
rt2x00dev->curr_freq = conf->chandef.chan->center_freq; rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
rt2x00dev->tx_power = conf->power_level; rt2x00dev->tx_power = conf->power_level;

View File

@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
*total_flags |= FIF_PSPOLL; *total_flags |= FIF_PSPOLL;
} }
/*
* Check if there is any work left for us.
*/
if (rt2x00dev->packet_filter == *total_flags)
return;
rt2x00dev->packet_filter = *total_flags; rt2x00dev->packet_filter = *total_flags;
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);

View File

@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count); !rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,

View File

@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_PLCPFAIL)); !(filter_flags & FIF_PLCPFAIL));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
!(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
!test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
!rt2x00dev->intf_ap_count); !rt2x00dev->intf_ap_count);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,

View File

@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
case COUNTRY_CODE_SPAIN: case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE: case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL: case COUNTRY_CODE_ISRAEL:
case COUNTRY_CODE_WORLD_WIDE_13:
return &rtl_regdom_12_13; return &rtl_regdom_12_13;
case COUNTRY_CODE_MKK: case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1: case COUNTRY_CODE_MKK1:
@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_14_60_64; return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN: case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14; return &rtl_regdom_14;
case COUNTRY_CODE_WORLD_WIDE_13:
case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
return &rtl_regdom_12_13_5g_all; return &rtl_regdom_12_13_5g_all;
default: default:

View File

@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
RING_IDX cons, prod; RING_IDX cons, prod;
unsigned short id; unsigned short id;
struct sk_buff *skb; struct sk_buff *skb;
bool more_to_do;
BUG_ON(!netif_carrier_ok(queue->info->netdev)); BUG_ON(!netif_carrier_ok(queue->info->netdev));
@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
queue->tx.rsp_cons = prod; queue->tx.rsp_cons = prod;
/* RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
* Set a new event, then check for race with update of tx_cons. } while (more_to_do);
* Note that it is essential to schedule a callback, no matter
* how few buffers are pending. Even if there is space in the
* transmit ring, higher layers may be blocked because too much
* data is outstanding: in such cases notification from Xen is
* likely to be the only kick that we'll get.
*/
queue->tx.sring->rsp_event =
prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
mb(); /* update shared area */
} while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
xennet_maybe_wake_tx(queue); xennet_maybe_wake_tx(queue);
} }

View File

@ -143,11 +143,31 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
} }
EXPORT_SYMBOL(of_mdio_parse_addr); EXPORT_SYMBOL(of_mdio_parse_addr);
/* The following is a list of PHY compatible strings which appear in
* some DTBs. The compatible string is never matched against a PHY
* driver, so is pointless. We only expect devices which are not PHYs
* to have a compatible string, so they can be matched to an MDIO
* driver. Encourage users to upgrade their DT blobs to remove these.
*/
static const struct of_device_id whitelist_phys[] = {
{ .compatible = "brcm,40nm-ephy" },
{ .compatible = "marvell,88E1111", },
{ .compatible = "marvell,88e1116", },
{ .compatible = "marvell,88e1118", },
{ .compatible = "marvell,88e1149r", },
{ .compatible = "marvell,88e1310", },
{ .compatible = "marvell,88E1510", },
{ .compatible = "marvell,88E1514", },
{ .compatible = "moxa,moxart-rtl8201cp", },
{}
};
/* /*
* Return true if the child node is for a phy. It must either: * Return true if the child node is for a phy. It must either:
* o Compatible string of "ethernet-phy-idX.X" * o Compatible string of "ethernet-phy-idX.X"
* o Compatible string of "ethernet-phy-ieee802.3-c45" * o Compatible string of "ethernet-phy-ieee802.3-c45"
* o Compatible string of "ethernet-phy-ieee802.3-c22" * o Compatible string of "ethernet-phy-ieee802.3-c22"
* o In the white list above (and issue a warning)
* o No compatibility string * o No compatibility string
* *
* A device which is not a phy is expected to have a compatible string * A device which is not a phy is expected to have a compatible string
@ -166,6 +186,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22"))
return true; return true;
if (of_match_node(whitelist_phys, child)) {
pr_warn(FW_WARN
"%s: Whitelisted compatible string. Please remove\n",
child->full_name);
return true;
}
if (!of_find_property(child, "compatible", NULL)) if (!of_find_property(child, "compatible", NULL))
return true; return true;
@ -256,11 +283,19 @@ static int of_phy_match(struct device *dev, void *phy_np)
struct phy_device *of_phy_find_device(struct device_node *phy_np) struct phy_device *of_phy_find_device(struct device_node *phy_np)
{ {
struct device *d; struct device *d;
struct mdio_device *mdiodev;
if (!phy_np) if (!phy_np)
return NULL; return NULL;
d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
return d ? to_phy_device(d) : NULL; if (d) {
mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
return to_phy_device(d);
}
return NULL;
} }
EXPORT_SYMBOL(of_phy_find_device); EXPORT_SYMBOL(of_phy_find_device);

View File

@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{ {
u64 ns; u64 ns;
u32 remainder;
unsigned long flags; unsigned long flags;
struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
struct ixp46x_ts_regs *regs = ixp_clock->regs; struct ixp46x_ts_regs *regs = ixp_clock->regs;
@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_unlock_irqrestore(&register_lock, flags); spin_unlock_irqrestore(&register_lock, flags);
ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); *ts = ns_to_timespec64(ns);
ts->tv_nsec = remainder;
return 0; return 0;
} }
@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp,
struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
struct ixp46x_ts_regs *regs = ixp_clock->regs; struct ixp46x_ts_regs *regs = ixp_clock->regs;
ns = ts->tv_sec * 1000000000ULL; ns = timespec64_to_ns(ts);
ns += ts->tv_nsec;
spin_lock_irqsave(&register_lock, flags); spin_lock_irqsave(&register_lock, flags);

View File

@ -613,9 +613,10 @@ out:
return err; return err;
} }
static int ssb_bus_register(struct ssb_bus *bus, static int __maybe_unused
ssb_invariants_func_t get_invariants, ssb_bus_register(struct ssb_bus *bus,
unsigned long baseaddr) ssb_invariants_func_t get_invariants,
unsigned long baseaddr)
{ {
int err; int err;

View File

@ -1261,6 +1261,9 @@ COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
COMPATIBLE_IOCTL(HCIINQUIRY) COMPATIBLE_IOCTL(HCIINQUIRY)
COMPATIBLE_IOCTL(HCIUARTSETPROTO) COMPATIBLE_IOCTL(HCIUARTSETPROTO)
COMPATIBLE_IOCTL(HCIUARTGETPROTO) COMPATIBLE_IOCTL(HCIUARTGETPROTO)
COMPATIBLE_IOCTL(HCIUARTGETDEVICE)
COMPATIBLE_IOCTL(HCIUARTSETFLAGS)
COMPATIBLE_IOCTL(HCIUARTGETFLAGS)
COMPATIBLE_IOCTL(RFCOMMCREATEDEV) COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
COMPATIBLE_IOCTL(RFCOMMRELEASEDEV) COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
COMPATIBLE_IOCTL(RFCOMMGETDEVLIST) COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)

View File

@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
clear_bit(NAPI_STATE_NPSVC, &n->state); clear_bit(NAPI_STATE_NPSVC, &n->state);
} }
#ifdef CONFIG_SMP
/** /**
* napi_synchronize - wait until NAPI is not running * napi_synchronize - wait until NAPI is not running
* @n: napi context * @n: napi context
@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
*/ */
static inline void napi_synchronize(const struct napi_struct *n) static inline void napi_synchronize(const struct napi_struct *n)
{ {
while (test_bit(NAPI_STATE_SCHED, &n->state)) if (IS_ENABLED(CONFIG_SMP))
msleep(1); while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
else
barrier();
} }
#else
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t { enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF, __QUEUE_STATE_DRV_XOFF,

View File

@ -252,6 +252,12 @@ struct l2cap_conn_rsp {
#define L2CAP_PSM_3DSP 0x0021 #define L2CAP_PSM_3DSP 0x0021
#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */ #define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
#define L2CAP_PSM_DYN_START 0x1001
#define L2CAP_PSM_DYN_END 0xffff
#define L2CAP_PSM_AUTO_END 0x10ff
#define L2CAP_PSM_LE_DYN_START 0x0080
#define L2CAP_PSM_LE_DYN_END 0x00ff
/* channel identifier */ /* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001 #define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002 #define L2CAP_CID_CONN_LESS 0x0002

View File

@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
return dst && !(dst->flags & DST_METADATA); return dst && !(dst->flags & DST_METADATA);
} }
static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
const struct sk_buff *skb_b)
{
const struct metadata_dst *a, *b;
if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
return 0;
a = (const struct metadata_dst *) skb_dst(skb_a);
b = (const struct metadata_dst *) skb_dst(skb_b);
if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
return 1;
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) + a->u.tun_info.options_len);
}
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);

View File

@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
void ip6_route_input(struct sk_buff *skb); void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6); struct flowi6 *fl6, int flags);
static inline struct dst_entry *ip6_route_output(struct net *net,
const struct sock *sk,
struct flowi6 *fl6)
{
return ip6_route_output_flags(net, sk, fl6, 0);
}
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags); int flags);

View File

@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *proto); const struct nf_conntrack_l4proto *proto);
#ifdef CONFIG_LOCKDEP #define CONNTRACK_LOCKS 1024
# define CONNTRACK_LOCKS 8
#else
# define CONNTRACK_LOCKS 1024
#endif
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock; extern spinlock_t nf_conntrack_expect_lock;

View File

@ -756,7 +756,6 @@ struct sctp_transport {
/* Reference counting. */ /* Reference counting. */
atomic_t refcnt; atomic_t refcnt;
__u32 dead:1,
/* RTO-Pending : A flag used to track if one of the DATA /* RTO-Pending : A flag used to track if one of the DATA
* chunks sent to this address is currently being * chunks sent to this address is currently being
* used to compute a RTT. If this flag is 0, * used to compute a RTT. If this flag is 0,
@ -766,7 +765,7 @@ struct sctp_transport {
* calculation completes (i.e. the DATA chunk * calculation completes (i.e. the DATA chunk
* is SACK'd) clear this flag. * is SACK'd) clear this flag.
*/ */
rto_pending:1, __u32 rto_pending:1,
/* /*
* hb_sent : a flag that signals that we have a pending * hb_sent : a flag that signals that we have a pending
@ -955,7 +954,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
void sctp_transport_free(struct sctp_transport *); void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_timers(struct sctp_transport *); void sctp_transport_reset_timers(struct sctp_transport *);
void sctp_transport_hold(struct sctp_transport *); int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32); void sctp_transport_update_rto(struct sctp_transport *, __u32);
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32); void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);

View File

@ -1035,18 +1035,6 @@ struct proto {
struct list_head node; struct list_head node;
#ifdef SOCK_REFCNT_DEBUG #ifdef SOCK_REFCNT_DEBUG
atomic_t socks; atomic_t socks;
#endif
#ifdef CONFIG_MEMCG_KMEM
/*
* cgroup specific init/deinit functions. Called once for all
* protocols that implement it, from cgroups populate function.
* This function has to setup any files the protocol want to
* appear in the kmem cgroup filesystem.
*/
int (*init_cgroup)(struct mem_cgroup *memcg,
struct cgroup_subsys *ss);
void (*destroy_cgroup)(struct mem_cgroup *memcg);
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
#endif #endif
int (*diag_destroy)(struct sock *sk, int err); int (*diag_destroy)(struct sock *sk, int err);
}; };

View File

@ -16,7 +16,7 @@ struct sock_reuseport {
}; };
extern int reuseport_alloc(struct sock *sk); extern int reuseport_alloc(struct sock *sk);
extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2); extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
extern void reuseport_detach_sock(struct sock *sk); extern void reuseport_detach_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk, extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash, u32 hash,

View File

@ -216,7 +216,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP thin-stream limits */ /* TCP thin-stream limits */
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */ /* TCP initial congestion window as per rfc6928 */
#define TCP_INIT_CWND 10 #define TCP_INIT_CWND 10
/* Bit Flags for sysctl_tcp_fastopen */ /* Bit Flags for sysctl_tcp_fastopen */

View File

@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
/* check that it's our buffer */ /* check that it's our buffer */
if (lowpan_is_ipv6(*skb_network_header(skb))) { if (lowpan_is_ipv6(*skb_network_header(skb))) {
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(skb, 1);
/* Copy the packet so that the IPv6 header is /* Copy the packet so that the IPv6 header is
* properly aligned. * properly aligned.
*/ */
@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
local_skb->protocol = htons(ETH_P_IPV6); local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST; local_skb->pkt_type = PACKET_HOST;
local_skb->dev = dev;
skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
if (!local_skb) if (!local_skb)
goto drop; goto drop;
local_skb->dev = dev;
ret = iphc_decompress(local_skb, dev, chan); ret = iphc_decompress(local_skb, dev, chan);
if (ret < 0) { if (ret < 0) {
kfree_skb(local_skb); kfree_skb(local_skb);
@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
local_skb->protocol = htons(ETH_P_IPV6); local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST; local_skb->pkt_type = PACKET_HOST;
local_skb->dev = dev;
if (give_skb_to_upper(local_skb, dev) if (give_skb_to_upper(local_skb, dev)
!= NET_RX_SUCCESS) { != NET_RX_SUCCESS) {

View File

@ -688,21 +688,29 @@ static u8 update_white_list(struct hci_request *req)
* command to remove it from the controller. * command to remove it from the controller.
*/ */
list_for_each_entry(b, &hdev->le_white_list, list) { list_for_each_entry(b, &hdev->le_white_list, list) {
struct hci_cp_le_del_from_white_list cp; /* If the device is neither in pend_le_conns nor
* pend_le_reports then remove it from the whitelist.
*/
if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
&b->bdaddr, b->bdaddr_type) &&
!hci_pend_le_action_lookup(&hdev->pend_le_reports,
&b->bdaddr, b->bdaddr_type)) {
struct hci_cp_le_del_from_white_list cp;
if (hci_pend_le_action_lookup(&hdev->pend_le_conns, cp.bdaddr_type = b->bdaddr_type;
&b->bdaddr, b->bdaddr_type) || bacpy(&cp.bdaddr, &b->bdaddr);
hci_pend_le_action_lookup(&hdev->pend_le_reports,
&b->bdaddr, b->bdaddr_type)) { hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
white_list_entries++; sizeof(cp), &cp);
continue; continue;
} }
cp.bdaddr_type = b->bdaddr_type; if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
bacpy(&cp.bdaddr, &b->bdaddr); /* White list can not be used with RPAs */
return 0x00;
}
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, white_list_entries++;
sizeof(cp), &cp);
} }
/* Since all no longer valid white list entries have been /* Since all no longer valid white list entries have been

View File

@ -197,10 +197,20 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
chan->sport = psm; chan->sport = psm;
err = 0; err = 0;
} else { } else {
u16 p; u16 p, start, end, incr;
if (chan->src_type == BDADDR_BREDR) {
start = L2CAP_PSM_DYN_START;
end = L2CAP_PSM_AUTO_END;
incr = 2;
} else {
start = L2CAP_PSM_LE_DYN_START;
end = L2CAP_PSM_LE_DYN_END;
incr = 1;
}
err = -EINVAL; err = -EINVAL;
for (p = 0x1001; p < 0x1100; p += 2) for (p = start; p <= end; p += incr)
if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) { if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
chan->psm = cpu_to_le16(p); chan->psm = cpu_to_le16(p);
chan->sport = cpu_to_le16(p); chan->sport = cpu_to_le16(p);

View File

@ -58,7 +58,7 @@ static int l2cap_validate_bredr_psm(u16 psm)
return -EINVAL; return -EINVAL;
/* Restrict usage of well-known PSMs */ /* Restrict usage of well-known PSMs */
if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE))
return -EACCES; return -EACCES;
return 0; return 0;
@ -67,11 +67,11 @@ static int l2cap_validate_bredr_psm(u16 psm)
static int l2cap_validate_le_psm(u16 psm) static int l2cap_validate_le_psm(u16 psm)
{ {
/* Valid LE_PSM ranges are defined only until 0x00ff */ /* Valid LE_PSM ranges are defined only until 0x00ff */
if (psm > 0x00ff) if (psm > L2CAP_PSM_LE_DYN_END)
return -EINVAL; return -EINVAL;
/* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */ /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */
if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE)) if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE))
return -EACCES; return -EACCES;
return 0; return 0;
@ -125,6 +125,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
goto done; goto done;
} }
bacpy(&chan->src, &la.l2_bdaddr);
chan->src_type = la.l2_bdaddr_type;
if (la.l2_cid) if (la.l2_cid)
err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid)); err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
else else
@ -156,9 +159,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
break; break;
} }
bacpy(&chan->src, &la.l2_bdaddr);
chan->src_type = la.l2_bdaddr_type;
if (chan->psm && bdaddr_type_is_le(chan->src_type)) if (chan->psm && bdaddr_type_is_le(chan->src_type))
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;

Some files were not shown because too many files have changed in this diff Show More