diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt index d3058768b23d..c53e0b08032f 100644 --- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt +++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt @@ -35,7 +35,7 @@ Example: device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index f0b4cd72411d..04e6bef3ac3f 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -44,9 +44,10 @@ Note that a port labelled "dsa" will imply checking for the uplink phandle described below. Optionnal property: -- link : Should be a phandle to another switch's DSA port. +- link : Should be a list of phandles to another switch's DSA port. This property is only used when switches are being - chained/cascaded together. + chained/cascaded together. This port is used as outgoing port + towards the phandle port, which can be more than one hop away. - phy-handle : Phandle to a PHY on an external MDIO bus, not the switch internal one. See @@ -58,6 +59,10 @@ Optionnal property: Documentation/devicetree/bindings/net/ethernet.txt for details. +- mii-bus : Should be a phandle to a valid MDIO bus device node. + This mii-bus will be used in preference to the + global dsa,mii-bus defined above, for this switch. + Optional subnodes: - fixed-link : Fixed-link subnode describing a link to a non-MDIO managed entity. See @@ -96,10 +101,11 @@ Example: label = "cpu"; }; - switch0uplink: port@6 { + switch0port6: port@6 { reg = <6>; label = "dsa"; - link = <&switch1uplink>; + link = <&switch1port0 + &switch2port0>; }; }; @@ -107,11 +113,31 @@ Example: #address-cells = <1>; #size-cells = <0>; reg = <17 1>; /* MDIO address 17, switch 1 in tree */ + mii-bus = <&mii_bus1>; - switch1uplink: port@0 { + switch1port0: port@0 { reg = <0>; label = "dsa"; - link = <&switch0uplink>; + link = <&switch0port6>; + }; + switch1port1: port@1 { + reg = <1>; + label = "dsa"; + link = <&switch2port1>; + }; + }; + + switch@2 { + #address-cells = <1>; + #size-cells = <0>; + reg = <18 2>; /* MDIO address 18, switch 2 in tree */ + mii-bus = <&mii_bus1>; + + switch2port0: port@0 { + reg = <0>; + label = "dsa"; + link = <&switch1port1 + &switch0port6>; }; }; }; diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt index 305e3df3d9b1..9cf9446eaf2e 100644 --- a/Documentation/devicetree/bindings/phy/ti-phy.txt +++ b/Documentation/devicetree/bindings/phy/ti-phy.txt @@ -82,6 +82,9 @@ Optional properties: - id: If there are multiple instance of the same type, in order to differentiate between each instance "id" can be used (e.g., multi-lane PCIe PHY). If "id" is not provided, it is set to default value of '1'. + - syscon-pllreset: Handle to system control region that contains the + CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0 + register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy. This is usually a subnode of ocp2scp to which it is connected. @@ -100,3 +103,16 @@ usb3phy@4a084400 { "sysclk", "refclk"; }; + +sata_phy: phy@4A096000 { + compatible = "ti,phy-pipe3-sata"; + reg = <0x4A096000 0x80>, /* phy_rx */ + <0x4A096400 0x64>, /* phy_tx */ + <0x4A096800 0x40>; /* pll_ctrl */ + reg-names = "phy_rx", "phy_tx", "pll_ctrl"; + ctrl-module = <&omap_control_sata>; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; + #phy-cells = <0>; +}; diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index c86f2f1ae4f6..1fec1135791d 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt @@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for -the DualPoint Stick. For non interleaved dualpoint devices the pointingstick -buttons get reported separately in the PSM, PSR and PSL bits. +the DualPoint Stick. The M, R and L bits signal the combined status of both +the pointingstick and touchpad buttons, except for Dell dualpoint devices +where the pointingstick buttons get reported separately in the PSM, PSR +and PSL bits. Dualpoint device -- interleaved packet format --------------------------------------------- diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.txt new file mode 100644 index 000000000000..a7dc7e939c7a --- /dev/null +++ b/Documentation/networking/6lowpan.txt @@ -0,0 +1,50 @@ + +Netdev private dataroom for 6lowpan interfaces: + +All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN, +must have "struct lowpan_priv" placed at beginning of netdev_priv. + +The priv_size of each interface should be calculate by: + + dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA); + +Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct. +To access the LL_PRIV_6LOWPAN_DATA structure you can cast: + + lowpan_priv(dev)-priv; + +to your LL_6LOWPAN_PRIV_DATA structure. + +Before registering the lowpan netdev interface you must run: + + lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR); + +wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of +enum lowpan_lltypes. + +Example to evaluate the private usually you can do: + +static inline sturct lowpan_priv_foobar * +lowpan_foobar_priv(struct net_device *dev) +{ + return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv; +} + +switch (dev->type) { +case ARPHRD_6LOWPAN: + lowpan_priv = lowpan_priv(dev); + /* do great stuff which is ARPHRD_6LOWPAN related */ + switch (lowpan_priv->lltype) { + case LOWPAN_LLTYPE_FOOBAR: + /* do 802.15.4 6LoWPAN handling here */ + lowpan_foobar_priv(dev)->bar = foo; + break; + ... + } + break; +... +} + +In case of generic 6lowpan branch ("net/6lowpan") you can remove the check +on ARPHRD_6LOWPAN, because you can be sure that these function are called +by ARPHRD_6LOWPAN interfaces. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 56db1efd7189..46e88ed7f41d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1181,6 +1181,11 @@ tag - INTEGER Allows you to write a number, which can be used as required. Default value is 0. +xfrm4_gc_thresh - INTEGER + The threshold at which we will start garbage collecting for IPv4 + destination cache entries. At twice this value the system will + refuse new allocations. + Alexey Kuznetsov. kuznet@ms2.inr.ac.ru @@ -1617,6 +1622,11 @@ ratelimit - INTEGER otherwise the minimal space between responses in milliseconds. Default: 1000 +xfrm6_gc_thresh - INTEGER + The threshold at which we will start garbage collecting for IPv6 + destination cache entries. At twice this value the system will + refuse new allocations. + IPv6 Update by: Pekka Savola diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 9825f32a8634..476df0496686 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -367,4 +367,5 @@ driver's rocker_port_ipv4_resolve() for an example. The driver can monitor for updates to arp_tbl using the netevent notifier NETEVENT_NEIGH_UPDATE. The device can be programmed with resolved nexthops -for the routes as arp_tbl updates. +for the routes as arp_tbl updates. The driver implements ndo_neigh_destroy +to know when arp_tbl neighbor entries are purged from the port. diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt index 6d993510f091..c28f4989c3f0 100644 --- a/Documentation/networking/vxlan.txt +++ b/Documentation/networking/vxlan.txt @@ -1,32 +1,36 @@ Virtual eXtensible Local Area Networking documentation ====================================================== -The VXLAN protocol is a tunnelling protocol that is designed to -solve the problem of limited number of available VLAN's (4096). -With VXLAN identifier is expanded to 24 bits. +The VXLAN protocol is a tunnelling protocol designed to solve the +problem of limited VLAN IDs (4096) in IEEE 802.1q. With VXLAN the +size of the identifier is expanded to 24 bits (16777216). -It is a draft RFC standard, that is implemented by Cisco Nexus, -Vmware and Brocade. The protocol runs over UDP using a single -destination port (still not standardized by IANA). -This document describes the Linux kernel tunnel device, -there is also an implantation of VXLAN for Openvswitch. +VXLAN is described by IETF RFC 7348, and has been implemented by a +number of vendors. The protocol runs over UDP using a single +destination port. This document describes the Linux kernel tunnel +device, there is also a separate implementation of VXLAN for +Openvswitch. -Unlike most tunnels, a VXLAN is a 1 to N network, not just point -to point. A VXLAN device can either dynamically learn the IP address -of the other end, in a manner similar to a learning bridge, or the -forwarding entries can be configured statically. +Unlike most tunnels, a VXLAN is a 1 to N network, not just point to +point. A VXLAN device can learn the IP address of the other endpoint +either dynamically in a manner similar to a learning bridge, or make +use of statically-configured forwarding entries. -The management of vxlan is done in a similar fashion to it's -too closest neighbors GRE and VLAN. Configuring VXLAN requires -the version of iproute2 that matches the kernel release -where VXLAN was first merged upstream. +The management of vxlan is done in a manner similar to its two closest +neighbors GRE and VLAN. Configuring VXLAN requires the version of +iproute2 that matches the kernel release where VXLAN was first merged +upstream. 1. Create vxlan device - # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 + # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789 -This creates a new device (vxlan0). The device uses the -the multicast group 239.1.1.1 over eth1 to handle packets where -no entry is in the forwarding table. +This creates a new device named vxlan0. The device uses the multicast +group 239.1.1.1 over eth1 to handle traffic for which there is no +entry in the forwarding table. The destination port number is set to +the IANA-assigned value of 4789. The Linux implementation of VXLAN +pre-dates the IANA's selection of a standard destination port number +and uses the Linux-selected value by default to maintain backwards +compatibility. 2. Delete vxlan device # ip link delete vxlan0 diff --git a/MAINTAINERS b/MAINTAINERS index 98ede02a96f2..7b528b8c73f8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -158,6 +158,7 @@ L: linux-wpan@vger.kernel.org S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h +F: Documentation/networking/6lowpan.txt 6PACK NETWORK DRIVER FOR AX.25 M: Andreas Koensgen @@ -5600,6 +5601,7 @@ F: kernel/irq/ IRQCHIP DRIVERS M: Thomas Gleixner M: Jason Cooper +M: Marc Zyngier L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core @@ -5608,11 +5610,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/ F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) -M: Benjamin Herrenschmidt +M: Jiang Liu +M: Marc Zyngier S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: Documentation/IRQ-domain.txt F: include/linux/irqdomain.h F: kernel/irq/irqdomain.c +F: kernel/irq/msi.c ISAPNP M: Jaroslav Kysela diff --git a/Makefile b/Makefile index afabc44a349b..35b4c196c171 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* @@ -597,6 +597,11 @@ endif # $(dot-config) # Defaults to vmlinux, but the arch makefile usually adds further targets all: vmlinux +# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default +# values of the respective KBUILD_* variables +ARCH_CPPFLAGS := +ARCH_AFLAGS := +ARCH_CFLAGS := include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) @@ -848,10 +853,10 @@ export mod_strip_cmd mod_compress_cmd = true ifdef CONFIG_MODULE_COMPRESS ifdef CONFIG_MODULE_COMPRESS_GZIP - mod_compress_cmd = gzip -n + mod_compress_cmd = gzip -n -f endif # CONFIG_MODULE_COMPRESS_GZIP ifdef CONFIG_MODULE_COMPRESS_XZ - mod_compress_cmd = xz + mod_compress_cmd = xz -f endif # CONFIG_MODULE_COMPRESS_XZ endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 91cf4055acab..bd4670d1b89b 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K config ARC_PAGE_SIZE_16K bool "16KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 config ARC_PAGE_SIZE_4K bool "4KB" - depends on ARC_MMU_V3 + depends on ARC_MMU_V3 || ARC_MMU_V4 endchoice @@ -365,6 +365,11 @@ config ARC_HAS_LLSC default y depends on !ARC_CANT_LLSC +config ARC_STAR_9000923308 + bool "Workaround for llock/scond livelock" + default y + depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC + config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" default y @@ -379,6 +384,10 @@ config ARC_HAS_LL64 dest operands with 2 possible source operands. default y +config ARC_HAS_DIV_REM + bool "Insn: div, divu, rem, remu" + default y + config ARC_HAS_RTC bool "Local 64-bit r/o cycle counter" default n diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 46d87310220d..8a27a48304a4 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape +ifdef CONFIG_ISA_ARCV2 + ifndef CONFIG_ARC_HAS_LL64 -cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 +cflags-y += -mno-ll64 +endif + +ifndef CONFIG_ARC_HAS_DIV_REM +cflags-y += -mno-div-rem +endif + endif cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 070f58827a5c..c8f57b8449dc 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -89,11 +89,10 @@ #define ECR_C_BIT_DTLB_LD_MISS 8 #define ECR_C_BIT_DTLB_ST_MISS 9 - /* Auxiliary registers */ #define AUX_IDENTITY 4 #define AUX_INTR_VEC_BASE 0x25 - +#define AUX_NON_VOL 0x5e /* * Floating Pt Registers @@ -240,9 +239,9 @@ struct bcr_extn_xymem { struct bcr_perip { #ifdef CONFIG_CPU_BIG_ENDIAN - unsigned int start:8, pad2:8, sz:8, pad:8; + unsigned int start:8, pad2:8, sz:8, ver:8; #else - unsigned int pad:8, sz:8, pad2:8, start:8; + unsigned int ver:8, sz:8, pad2:8, start:8; #endif }; diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 03484cb4d16d..87d18ae53115 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -23,33 +23,60 @@ #define atomic_set(v, i) (((v)->counter) = (i)) -#ifdef CONFIG_ISA_ARCV2 -#define PREFETCHW " prefetchw [%1] \n" -#else -#define PREFETCHW +#ifdef CONFIG_ARC_STAR_9000923308 + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay = 1, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " bz 4f \n" \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + "4: ; --- success --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ + +#else /* !CONFIG_ARC_STAR_9000923308 */ + +#define SCOND_FAIL_RETRY_VAR_DEF + +#define SCOND_FAIL_RETRY_ASM \ + " bnz 1b \n" \ + +#define SCOND_FAIL_RETRY_VARS + #endif #define ATOMIC_OP(op, c_op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ + [i] "ir" (i) \ : "cc"); \ } \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ - unsigned int temp; \ + unsigned int val; \ + SCOND_FAIL_RETRY_VAR_DEF \ \ /* \ * Explicit full memory barrier needed before/after as \ @@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ smp_mb(); \ \ __asm__ __volatile__( \ - "1: \n" \ - PREFETCHW \ - " llock %0, [%1] \n" \ - " " #asm_op " %0, %0, %2 \n" \ - " scond %0, [%1] \n" \ - " bnz 1b \n" \ - : "=&r"(temp) \ - : "r"(&v->counter), "ir"(i) \ + "1: llock %[val], [%[ctr]] \n" \ + " " #asm_op " %[val], %[val], %[i] \n" \ + " scond %[val], [%[ctr]] \n" \ + " \n" \ + SCOND_FAIL_RETRY_ASM \ + \ + : [val] "=&r" (val) \ + SCOND_FAIL_RETRY_VARS \ + : [ctr] "r" (&v->counter), \ + [i] "ir" (i) \ : "cc"); \ \ smp_mb(); \ \ - return temp; \ + return val; \ } #else /* !CONFIG_ARC_HAS_LLSC */ @@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and) #undef ATOMIC_OPS #undef ATOMIC_OP_RETURN #undef ATOMIC_OP +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS /** * __atomic_add_unless - add unless the number is a given value diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 91694ec1ce95..69095da1fcfd 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -20,20 +20,20 @@ struct pt_regs { /* Real registers */ - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long lp_start, lp_end, lp_count; + unsigned long lp_start, lp_end, lp_count; - long status32; /* status32_l1, status32_l2, erstatus */ - long ret; /* ilink1, ilink2 or eret */ - long blink; - long fp; - long r26; /* gp */ + unsigned long status32; /* status32_l1, status32_l2, erstatus */ + unsigned long ret; /* ilink1, ilink2 or eret */ + unsigned long blink; + unsigned long fp; + unsigned long r26; /* gp */ - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; /* user/kernel sp depending on where we came from */ - long orig_r0; + unsigned long sp; /* User/Kernel depending on where we came from */ + unsigned long orig_r0; /* * To distinguish bet excp, syscall, irq @@ -55,13 +55,13 @@ struct pt_regs { unsigned long event; }; - long user_r25; + unsigned long user_r25; }; #else struct pt_regs { - long orig_r0; + unsigned long orig_r0; union { struct { @@ -76,26 +76,26 @@ struct pt_regs { unsigned long event; }; - long bta; /* bta_l1, bta_l2, erbta */ + unsigned long bta; /* bta_l1, bta_l2, erbta */ - long user_r25; + unsigned long user_r25; - long r26; /* gp */ - long fp; - long sp; /* user/kernel sp depending on where we came from */ + unsigned long r26; /* gp */ + unsigned long fp; + unsigned long sp; /* user/kernel sp depending on where we came from */ - long r12; + unsigned long r12; /*------- Below list auto saved by h/w -----------*/ - long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; + unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; - long blink; - long lp_end, lp_start, lp_count; + unsigned long blink; + unsigned long lp_end, lp_start, lp_count; - long ei, ldi, jli; + unsigned long ei, ldi, jli; - long ret; - long status32; + unsigned long ret; + unsigned long status32; }; #endif @@ -103,10 +103,10 @@ struct pt_regs { /* Callee saved registers - need to be saved only when you are scheduled out */ struct callee_regs { - long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; }; -#define instruction_pointer(regs) (unsigned long)((regs)->ret) +#define instruction_pointer(regs) ((regs)->ret) #define profile_pc(regs) instruction_pointer(regs) /* return 1 if user mode or 0 if kernel mode */ @@ -142,7 +142,7 @@ struct callee_regs { static inline long regs_return_value(struct pt_regs *regs) { - return regs->r0; + return (long)regs->r0; } #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index e1651df6a93d..db8c59d1eaeb 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -18,9 +18,518 @@ #define arch_spin_unlock_wait(x) \ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) +#ifdef CONFIG_ARC_HAS_LLSC + +/* + * A normal LLOCK/SCOND based system, w/o need for livelock workaround + */ +#ifndef CONFIG_ARC_STAR_9000923308 + static inline void arch_spin_lock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bnz 1b \n" + " mov %[got_it], 1 \n" + "4: \n" + " \n" + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + + smp_mb(); + + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" /* retry if collided with someone */ + " mov %[got_it], 1 \n" + " \n" + "4: ; --- done --- \n" + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + smp_mb(); + + rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + + smp_mb(); +} + +#else /* CONFIG_ARC_STAR_9000923308 */ + +/* + * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping + * coherency transactions in the SCU. The exclusive line state keeps rotating + * among contenting cores leading to a never ending cycle. So break the cycle + * by deferring the retry of failed exclusive access (SCOND). The actual delay + * needed is function of number of contending cores as well as the unrelated + * coherency traffic from other cores. To keep the code simple, start off with + * small delay of 1 which would suffice most cases and in case of contention + * double the delay. Eventually the delay is sufficient such that the coherency + * pipeline is drained, thus a subsequent exclusive access would succeed. + */ + +#define SCOND_FAIL_RETRY_VAR_DEF \ + unsigned int delay, tmp; \ + +#define SCOND_FAIL_RETRY_ASM \ + " ; --- scond fail delay --- \n" \ + " mov %[tmp], %[delay] \n" /* tmp = delay */ \ + "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ + " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ + " rol %[delay], %[delay] \n" /* delay *= 2 */ \ + " b 1b \n" /* start over */ \ + " \n" \ + "4: ; --- done --- \n" \ + +#define SCOND_FAIL_RETRY_VARS \ + ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[slock]] \n" + " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ + " scond %[LOCKED], [%[slock]] \n" /* acquire */ + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [slock] "r" (&(lock->slock)), + [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + smp_mb(); + + lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; + + smp_mb(); +} + +/* + * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) + */ + +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * zero means writer holds the lock exclusively, deny Reader. + * Otherwise grant lock to first/subseq reader + * + * if (rw->counter > 0) { + * rw->counter--; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ + " sub %[val], %[val], 1 \n" /* reader lock */ + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" /* done */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ + " sub %[val], %[val], 1 \n" /* counter-- */ + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int val; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + /* + * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), + * deny writer. Otherwise if unlocked grant to writer + * Hence the claim that Linux rwlocks are unfair to writers. + * (can be starved for an indefinite time by readers). + * + * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { + * rw->counter = 0; + * ret = 1; + * } + */ + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz 4f \n" + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); +} + +/* 1 - lock taken successfully */ +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int val, got_it = 0; + SCOND_FAIL_RETRY_VAR_DEF; + + smp_mb(); + + __asm__ __volatile__( + "0: mov %[delay], 1 \n" + "1: llock %[val], [%[rwlock]] \n" + " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ + " mov %[val], %[WR_LOCKED] \n" + " scond %[val], [%[rwlock]] \n" + " bz.d 4f \n" + " mov.z %[got_it], 1 \n" /* got it */ + " \n" + SCOND_FAIL_RETRY_ASM + + : [val] "=&r" (val), + [got_it] "+&r" (got_it) + SCOND_FAIL_RETRY_VARS + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), + [WR_LOCKED] "ir" (0) + : "memory", "cc"); + + smp_mb(); + + return got_it; +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter++; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " add %[val], %[val], 1 \n" + " scond %[val], [%[rwlock]] \n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)) + : "memory", "cc"); + + smp_mb(); +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + unsigned int val; + + smp_mb(); + + /* + * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; + */ + __asm__ __volatile__( + "1: llock %[val], [%[rwlock]] \n" + " scond %[UNLOCKED], [%[rwlock]]\n" + " bnz 1b \n" + " \n" + : [val] "=&r" (val) + : [rwlock] "r" (&(rw->counter)), + [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) + : "memory", "cc"); + + smp_mb(); +} + +#undef SCOND_FAIL_RETRY_VAR_DEF +#undef SCOND_FAIL_RETRY_ASM +#undef SCOND_FAIL_RETRY_VARS + +#endif /* CONFIG_ARC_STAR_9000923308 */ + +#else /* !CONFIG_ARC_HAS_LLSC */ + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * This smp_mb() is technically superfluous, we only need the one @@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" - : "+&r" (tmp) + : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); @@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) smp_mb(); } +/* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); - return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); + return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { - unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier @@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " ex %0, [%1] \n" - : "+r" (tmp) + : "+r" (val) : "r"(&(lock->slock)) : "memory"); @@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) /* * Read-write spinlocks, allowing multiple readers but only one writer. + * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. - * - * Unfair locking as Writers could be starved indefinitely by Reader(s) */ -/* Would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->counter > 0) - -/* Would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) - /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { @@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&(rw->lock_mutex)); } +#endif + +#define arch_read_can_lock(x) ((x)->counter > 0) +#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__) + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h index 662627ced4f2..4e1ef5f650c6 100644 --- a/arch/arc/include/asm/spinlock_types.h +++ b/arch/arc/include/asm/spinlock_types.h @@ -26,7 +26,9 @@ typedef struct { */ typedef struct { volatile unsigned int counter; +#ifndef CONFIG_ARC_HAS_LLSC arch_spinlock_t lock_mutex; +#endif } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 76a7739aab1c..0b3ef63d4a03 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h @@ -32,20 +32,20 @@ */ struct user_regs_struct { - long pad; + unsigned long pad; struct { - long bta, lp_start, lp_end, lp_count; - long status32, ret, blink, fp, gp; - long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; - long sp; + unsigned long bta, lp_start, lp_end, lp_count; + unsigned long status32, ret, blink, fp, gp; + unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; + unsigned long sp; } scratch; - long pad2; + unsigned long pad2; struct { - long r25, r24, r23, r22, r21, r20; - long r19, r18, r17, r16, r15, r14, r13; + unsigned long r25, r24, r23, r22, r21, r20; + unsigned long r19, r18, r17, r16, r15, r14, r13; } callee; - long efa; /* break pt addr, for break points in delay slots */ - long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ + unsigned long efa; /* break pt addr, for break points in delay slots */ + unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ }; #endif /* !__ASSEMBLY__ */ diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 18cc01591c96..cabde9dc0696 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void) struct bcr_perip uncached_space; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; + unsigned long perip_space; FIX_PTR(cpu); READ_BCR(AUX_IDENTITY, cpu->core); @@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void) cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); - BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); + if (uncached_space.ver < 3) + perip_space = uncached_space.start << 24; + else + perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000; + + BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE); READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); @@ -330,6 +336,10 @@ static void arc_chk_core_config(void) pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); else if (!cpu->extn.fpu_dp && fpu_enabled) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); + + if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) + panic("llock/scond livelock workaround missing\n"); } /* diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 3364d2bbc515..4294761a2b3e 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta, return 0; } -static void arc_clkevent_set_mode(enum clock_event_mode mode, - struct clock_event_device *dev) +static int arc_clkevent_set_periodic(struct clock_event_device *dev) { - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* - * At X Hz, 1 sec = 1000ms -> X cycles; - * 10ms -> X / 100 cycles - */ - arc_timer_event_setup(arc_get_core_freq() / HZ); - break; - case CLOCK_EVT_MODE_ONESHOT: - break; - default: - break; - } - - return; + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_get_core_freq() / HZ); + return 0; } static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { - .name = "ARC Timer0", - .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, - .mode = CLOCK_EVT_MODE_UNUSED, - .rating = 300, - .irq = TIMER0_IRQ, /* hardwired, no need for resources */ - .set_next_event = arc_clkevent_set_next_event, - .set_mode = arc_clkevent_set_mode, + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .irq = TIMER0_IRQ, /* hardwired, no need for resources */ + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, }; static irqreturn_t timer_irq_handler(int irq, void *dev_id) @@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() */ struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); - int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; + int irq_reenable = clockevent_state_periodic(evt); /* * Any write to CTRL reg ACks the interrupt, we rewrite the diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index 1b2b3acfed52..0cab0b8a57c5 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -206,7 +206,7 @@ unalignedOffby3: ld.ab r6, [r1, 4] prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetch [r3, 32] ;Prefetch the next write location + prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 92d573c734b5..365b18364815 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -10,12 +10,6 @@ #undef PREALLOC_NOT_AVAIL -#ifdef PREALLOC_NOT_AVAIL -#define PREWRITE(A,B) prefetchw [(A),(B)] -#else -#define PREWRITE(A,B) prealloc [(A),(B)] -#endif - ENTRY(memset) prefetchw [r0] ; Prefetch the write location mov.f 0, r2 @@ -51,9 +45,15 @@ ENTRY(memset) ;;; Convert len to Dwords, unfold x8 lsr.f lp_count, lp_count, 6 + lpnz @.Lset64bytes ;; LOOP START - PREWRITE(r3, 64) ;Prefetch the next write location +#ifdef PREALLOC_NOT_AVAIL + prefetchw [r3, 64] ;Prefetch the next write location +#else + prealloc [r3, 64] +#endif +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -62,16 +62,45 @@ ENTRY(memset) std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset64bytes: lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START prefetchw [r3, 32] ;Prefetch the next write location +#ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] std.ab r4, [r3, 8] +#else + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] + st.ab r4, [r3, 4] +#endif .Lset32bytes: and.f lp_count, r2, 0x1F ;Last remaining 31 bytes diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index 99f7da513a48..e7769c3ab5f2 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od) static void __init axs103_early_init(void) { + /* + * AXS103 configurations for SMP/QUAD configurations share device tree + * which defaults to 90 MHz. However recent failures of Quad config + * revealed P&R timing violations so clamp it down to safe 50 MHz + * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack + * + * This hack is really hacky as of now. Fix it properly by getting the + * number of cores as return value of platform's early SMP callback + */ +#ifdef CONFIG_ARC_MCIP + unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; + if (num_cores > 2) + arc_set_core_freq(50 * 1000000); +#endif + switch (arc_get_core_freq()/1000000) { case 33: axs103_set_freq(1, 1, 1); diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 21fcc440fc1a..8b59c869c412 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -700,7 +700,7 @@ }; mac: ethernet@4a100000 { - compatible = "ti,cpsw"; + compatible = "ti,am335x-cpsw","ti,cpsw"; ti,hwmods = "cpgmac0"; clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; clock-names = "fck", "cpts"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 8f1e25bcecbd..0001e959bf49 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1140,6 +1140,7 @@ ctrl-module = <&omap_control_sata>; clocks = <&sys_clkin1>, <&sata_ref_clk>; clock-names = "sysclk", "refclk"; + syscon-pllreset = <&scm_conf 0x3fc>; #phy-cells = <0>; }; @@ -1398,7 +1399,7 @@ }; mac: ethernet@4a100000 { - compatible = "ti,cpsw"; + compatible = "ti,dra7-cpsw","ti,cpsw"; ti,hwmods = "gmac"; clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; clock-names = "fck", "cpts"; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index d7201333e3bc..2db99433e17f 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -138,8 +138,8 @@ mipi_phy: video-phy@10020710 { compatible = "samsung,s5pv210-mipi-video-phy"; - reg = <0x10020710 8>; #phy-cells = <1>; + syscon = <&pmu_system_controller>; }; pd_cam: cam-power-domain@10023C00 { diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts index e0abfc3324d1..e050d85cdacd 100644 --- a/arch/arm/boot/dts/exynos4210-origen.dts +++ b/arch/arm/boot/dts/exynos4210-origen.dts @@ -127,6 +127,10 @@ }; }; +&cpu0 { + cpu0-supply = <&buck1_reg>; +}; + &fimd { pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts index 98f3ce65cb9a..ba34886f8b65 100644 --- a/arch/arm/boot/dts/exynos4210-trats.dts +++ b/arch/arm/boot/dts/exynos4210-trats.dts @@ -188,6 +188,10 @@ }; }; +&cpu0 { + cpu0-supply = <&varm_breg>; +}; + &dsi_0 { vddcore-supply = <&vusb_reg>; vddio-supply = <&vmipi_reg>; diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts index d4f2b11319dd..775892b2cc6a 100644 --- a/arch/arm/boot/dts/exynos4210-universal_c210.dts +++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts @@ -548,6 +548,10 @@ }; }; +&cpu0 { + cpu0-supply = <&vdd_arm_reg>; +}; + &pinctrl_1 { hdmi_hpd: hdmi-hpd { samsung,pins = "gpx3-7"; diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index 10d3c173396e..3e5ba665d200 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi @@ -40,6 +40,18 @@ device_type = "cpu"; compatible = "arm,cortex-a9"; reg = <0x900>; + clocks = <&clock CLK_ARM_CLK>; + clock-names = "cpu"; + clock-latency = <160000>; + + operating-points = < + 1200000 1250000 + 1000000 1150000 + 800000 1075000 + 500000 975000 + 400000 975000 + 200000 950000 + >; cooling-min-level = <4>; cooling-max-level = <2>; #cooling-cells = <2>; /* min followed by max */ diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi index b6478e97d6a7..e6540b5cfa4c 100644 --- a/arch/arm/boot/dts/imx35.dtsi +++ b/arch/arm/boot/dts/imx35.dtsi @@ -286,8 +286,8 @@ can1: can@53fe4000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe4000 0x1000>; - clocks = <&clks 33>; - clock-names = "ipg"; + clocks = <&clks 33>, <&clks 33>; + clock-names = "ipg", "per"; interrupts = <43>; status = "disabled"; }; @@ -295,8 +295,8 @@ can2: can@53fe8000 { compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; reg = <0x53fe8000 0x1000>; - clocks = <&clks 34>; - clock-names = "ipg"; + clocks = <&clks 34>, <&clks 34>; + clock-names = "ipg", "per"; interrupts = <44>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi index 4773d6af66a0..d56d68fe7ffc 100644 --- a/arch/arm/boot/dts/k2e-clocks.dtsi +++ b/arch/arm/boot/dts/k2e-clocks.dtsi @@ -13,9 +13,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi index d5adee3c0067..af9b7190533a 100644 --- a/arch/arm/boot/dts/k2hk-clocks.dtsi +++ b/arch/arm/boot/dts/k2hk-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi index eb1e3e29f073..ef8464bb11ff 100644 --- a/arch/arm/boot/dts/k2l-clocks.dtsi +++ b/arch/arm/boot/dts/k2l-clocks.dtsi @@ -22,9 +22,8 @@ clocks { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; - fixed-postdiv = <2>; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 3d0b8755caee..3d25dba143a5 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -17,6 +17,7 @@ }; aliases { + serial1 = &uart1; stmpe-i2c0 = &stmpe0; stmpe-i2c1 = &stmpe1; }; diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index 85d3b95dfdba..3c140d05f796 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts @@ -15,6 +15,10 @@ bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; }; + aliases { + serial1 = &uart1; + }; + src@101e0000 { /* These chrystal drivers are not used on this board */ disable-sxtalo; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 9a5f2ba139b7..ef794a33b4dc 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -757,6 +757,7 @@ clock-names = "uartclk", "apb_pclk"; pinctrl-names = "default"; pinctrl-0 = <&uart0_default_mux>; + status = "disabled"; }; uart1: uart@101fb000 { diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d78c12e7cb5e..486cc4ded190 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np, * registers. This address is needed early so the OCP registers that * are part of the device's address space can be ioremapped properly. * + * If SYSC access is not needed, the registers will not be remapped + * and non-availability of MPU access is not treated as an error. + * * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and * -ENXIO on absent or invalid register target address space. */ @@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, _save_mpu_port_index(oh); + /* if we don't need sysc access we don't need to ioremap */ + if (!oh->class->sysc) + return 0; + + /* we can't continue without MPU PORT if we need sysc access */ if (oh->_int_flags & _HWMOD_NO_MPU_PORT) return -ENXIO; @@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, oh->name); /* Extract the IO space from device tree blob */ - if (!np) + if (!np) { + pr_err("omap_hwmod: %s: no dt node\n", oh->name); return -ENXIO; + } va_start = of_iomap(np, index + oh->mpu_rt_idx); } else { @@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) oh->name, np->name); } - if (oh->class->sysc) { - r = _init_mpu_rt_base(oh, NULL, index, np); - if (r < 0) { - WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", - oh->name); - return 0; - } + r = _init_mpu_rt_base(oh, NULL, index, np); + if (r < 0) { + WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", + oh->name); + return 0; } r = _init_clocks(oh, NULL); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 2606c6608bd8..562247bced49 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = { .syss_offs = 0x0014, .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; @@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = { .class = &dra7xx_gpmc_hwmod_class, .clkdm_name = "l3main1_clkdm", /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ - .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, + .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS, .main_clk = "l3_iclk_div", .prcm = { .omap4 = { diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0689c3fb56e3..58093edeea2e 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -823,7 +823,7 @@ device_type = "dma"; reg = <0x0 0x1f270000 0x0 0x10000>, <0x0 0x1f200000 0x0 0x10000>, - <0x0 0x1b008000 0x0 0x2000>, + <0x0 0x1b000000 0x0 0x400000>, <0x0 0x1054a000 0x0 0x100>; interrupts = <0x0 0x82 0x4>, <0x0 0xb8 0x4>, diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1670f15ef69e..948f0ad2de23 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitely for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif break; @@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE)) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ec37ab3f524f..97bc68f4c689 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -199,16 +199,15 @@ up_fail: */ void update_vsyscall(struct timekeeper *tk) { - struct timespec xtime_coarse; u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); - xtime_coarse = __current_kernel_time(); vdso_data->use_syscall = use_syscall; - vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; - vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + vdso_data->xtime_coarse_sec = tk->xtime_sec; + vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >> + tk->tkr_mono.shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cee5f93e5712..199a8357838c 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -151,7 +151,6 @@ config BMIPS_GENERIC select BCM7120_L2_IRQ select BRCMSTB_L2_IRQ select IRQ_MIPS_CPU - select RAW_IRQ_ACCESSORS select DMA_NONCOHERENT select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 01a644f174dd..1ba21204ebe0 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -190,6 +190,7 @@ int get_c0_perfcount_int(void) { return ATH79_MISC_IRQ(5); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 56f5d080ef9d..b7fa9ae28c36 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id) cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h deleted file mode 100644 index 11d3b572b1b3..000000000000 --- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H -#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H - -#include - -#define plat_post_dma_flush bmips_post_dma_flush - -#include - -#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 9d8106758142..ae8569475264 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ +#ifdef CONFIG_SMP + /* + * For SMP, multiple CPUs can race, so we need to do + * this atomically. + */ +#ifdef CONFIG_64BIT +#define LL_INSN "lld" +#define SC_INSN "scd" +#else /* CONFIG_32BIT */ +#define LL_INSN "ll" +#define SC_INSN "sc" +#endif + unsigned long page_global = _PAGE_GLOBAL; + unsigned long tmp; + + __asm__ __volatile__ ( + " .set push\n" + " .set noreorder\n" + "1: " LL_INSN " %[tmp], %[buddy]\n" + " bnez %[tmp], 2f\n" + " or %[tmp], %[tmp], %[global]\n" + " " SC_INSN " %[tmp], %[buddy]\n" + " beqz %[tmp], 1b\n" + " nop\n" + "2:\n" + " .set pop" + : [buddy] "+m" (buddy->pte), + [tmp] "=&r" (tmp) + : [global] "r" (page_global)); +#else /* !CONFIG_SMP */ if (pte_none(*buddy)) pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; +#endif /* CONFIG_SMP */ } #endif } diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 16f1ea9ab191..03722d4326a1 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif -extern asmlinkage void smp_call_function_interrupt(void); - static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index 28d6d9364bd1..a71da576883c 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -152,6 +152,31 @@ .set noreorder bltz k0, 8f move k1, sp +#ifdef CONFIG_EVA + /* + * Flush interAptiv's Return Prediction Stack (RPS) by writing + * EntryHi. Toggling Config7.RPS is slower and less portable. + * + * The RPS isn't automatically flushed when exceptions are + * taken, which can result in kernel mode speculative accesses + * to user addresses if the RPS mispredicts. That's harmless + * when user and kernel share the same address space, but with + * EVA the same user segments may be unmapped to kernel mode, + * even containing sensitive MMIO regions or invalid memory. + * + * This can happen when the kernel sets the return address to + * ret_from_* and jr's to the exception handler, which looks + * more like a tail call than a function call. If nested calls + * don't evict the last user address in the RPS, it will + * mispredict the return and fetch from a user controlled + * address into the icache. + * + * More recent EVA-capable cores with MAAR to restrict + * speculative accesses aren't affected. + */ + MFC0 k0, CP0_ENTRYHI + MTC0 k0, CP0_ENTRYHI +#endif .set reorder /* Called from user mode, new stack. */ get_saved_sp diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 3e4491aa6d6b..789d7bf4fef3 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { unsigned int real_len; - cpumask_t mask; + cpumask_t allowed, mask; int retval; struct task_struct *p; @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (retval) goto out_unlock; - cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); + cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); + cpumask_and(&mask, &allowed, cpu_active_mask); out_unlock: read_unlock(&tasklist_lock); diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index b130033838ba..5fcec3032f38 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -38,7 +38,7 @@ char *mips_get_machine_name(void) return mips_machine_name; } -#ifdef CONFIG_OF +#ifdef CONFIG_USE_OF void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index 74bab9ddd0e1..c6bbf2165051 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -24,7 +24,7 @@ LEAF(relocate_new_kernel) process_entry: PTR_L s2, (s0) - PTR_ADD s0, s0, SZREG + PTR_ADDIU s0, s0, SZREG /* * In case of a kdump/crash kernel, the indirection page is not @@ -61,9 +61,9 @@ copy_word: /* copy page word by word */ REG_L s5, (s2) REG_S s5, (s4) - PTR_ADD s4, s4, SZREG - PTR_ADD s2, s2, SZREG - LONG_SUB s6, s6, 1 + PTR_ADDIU s4, s4, SZREG + PTR_ADDIU s2, s2, SZREG + LONG_ADDIU s6, s6, -1 beq s6, zero, process_entry b copy_word b process_entry diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 19a7705f2a01..5d7f2634996f 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 336708ae5c5b..78cf8c2f1de0 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) if (action == 0) scheduler_ipi(); else - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d0744cc77ea7..a31896c33716 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -192,16 +192,6 @@ asmlinkage void start_secondary(void) cpu_startup_entry(CPUHP_ONLINE); } -/* - * Call into both interrupt handlers, as we share the IPI for them - */ -void __irq_entry smp_call_function_interrupt(void) -{ - irq_enter(); - generic_smp_call_function_interrupt(); - irq_exit(); -} - static void stop_this_cpu(void *dummy) { /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e207a43b5f8f..8ea28e6ab37d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task, void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; + mm_segment_t old_fs = get_fs(); if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; @@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp) prepare_frametrace(®s); } } + /* + * show_stack() deals exclusively with kernel mode, so be sure to access + * the stack in the kernel (not user) address space. + */ + set_fs(KERNEL_DS); show_stacktrace(task, ®s); + set_fs(old_fs); } static void show_code(unsigned int __user *pc) @@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) const int field = 2 * sizeof(unsigned long); int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; + mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } + if (!user_mode(regs)) + set_fs(KERNEL_DS); + show_code((unsigned int __user *) regs->cp0_epc); + set_fs(old_fs); + /* * Some chips may have other causes of machine check (e.g. SB1 * graduation timer) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index af84bef0c90d..eb3efd137fd1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -438,7 +438,7 @@ do { \ : "memory"); \ } while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 6ab10573490d..2c218c3bbca5 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } @@ -466,6 +466,7 @@ int get_c0_perfcount_int(void) { return ltq_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c index 509877c6e9d9..1a4738a8f2d3 100644 --- a/arch/mips/loongson64/loongson-3/smp.c +++ b/arch/mips/loongson64/loongson-3/smp.c @@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } if (action & SMP_ASK_C0COUNT) { BUG_ON(cpu != 0); diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 77d96db8253c..aab218c36e0d 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -160,18 +160,18 @@ static inline void setup_protection_map(void) protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); - protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); - protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); + protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); - protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); + protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); } else { diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 36c0f26fac6b..852a41c6da45 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -133,7 +133,8 @@ good_area: #endif goto bad_area; } - if (!(vma->vm_flags & VM_READ)) { + if (!(vma->vm_flags & VM_READ) && + exception_epc(regs) != address) { #if 0 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", raw_smp_processor_id(), diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index d1392f8f5811..fa8f591f3713 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c @@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 5625b190edc0..b7bf721eabf5 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -154,6 +154,7 @@ int get_c0_perfcount_int(void) return mips_cpu_perf_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { @@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void) static void __init init_rtc(void) { - /* stop the clock whilst setting it up */ - CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); + unsigned char freq, ctrl; - /* 32KHz time base */ - CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); + /* Set 32KHz time base if not already set */ + freq = CMOS_READ(RTC_FREQ_SELECT); + if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - /* start the clock */ - CMOS_WRITE(RTC_24H, RTC_CONTROL); + /* Ensure SET bit is clear so RTC can run */ + ctrl = CMOS_READ(RTC_CONTROL); + if (ctrl & RTC_SET) + CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); } void __init plat_time_init(void) diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index e1d69895fb1d..a120b7a5a8fe 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -77,6 +77,7 @@ int get_c0_perfcount_int(void) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index dc3e327fbbac..f5fff228b347 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc) { clear_c0_eimr(irq); ack_c0_eirr(irq); - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); set_c0_eimr(irq); } diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c index 42181c7105df..f8d3e081b2eb 100644 --- a/arch/mips/paravirt/paravirt-smp.c +++ b/arch/mips/paravirt/paravirt-smp.c @@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id) static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c index 7c73fcb92a10..8a377346f0ca 100644 --- a/arch/mips/pistachio/time.c +++ b/arch/mips/pistachio/time.c @@ -26,6 +26,7 @@ int get_c0_perfcount_int(void) { return gic_get_c0_perfcount_int(); } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); int get_c0_fdc_int(void) { diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c index 10170580a2de..ffa0f7101a97 100644 --- a/arch/mips/pmcs-msp71xx/msp_smp.c +++ b/arch/mips/pmcs-msp71xx/msp_smp.c @@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 53707aacc0f8..8c624a8b9ea2 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -89,6 +89,7 @@ int get_c0_perfcount_int(void) { return rt_perfcount_irq; } +EXPORT_SYMBOL_GPL(get_c0_perfcount_int); unsigned int get_c0_compare_int(void) { diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 3fbaef97a1b8..16ec4e12daa3 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - smp_call_function_interrupt(); + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); } else #endif { diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index af7d44edd9a8..4c71aea25663 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -29,8 +29,6 @@ #include #include -extern void smp_call_function_interrupt(void); - /* * These are routines for dealing with the bcm1480 smp capabilities * independent of board/firmware @@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index c0c4b3f88a08..1cf66f5ff23d 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void) if (action & SMP_RESCHEDULE_YOURSELF) scheduler_ipi(); - if (action & SMP_CALL_FUNCTION) - smp_call_function_interrupt(); + if (action & SMP_CALL_FUNCTION) { + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + } } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index d3a831ac0f92..da50e0c9c57e 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { - memset(to, 0, sizeof *to); - if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 5cf5e6ea213b..7cf0df859d05 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) } /* Unmask the event */ - if (eeh_enabled()) + if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) enable_irq(eeh_event_irq); return ret; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 5738d315248b..85cbc96eff6c 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb) static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, unsigned levels, unsigned long limit, - unsigned long *current_offset) + unsigned long *current_offset, unsigned long *total_allocated) { struct page *tce_mem = NULL; __be64 *addr, *tmp; @@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, } addr = page_address(tce_mem); memset(addr, 0, allocated); + *total_allocated += allocated; --levels; if (!levels) { @@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, for (i = 0; i < entries; ++i) { tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, - levels, limit, current_offset); + levels, limit, current_offset, total_allocated); if (!tmp) break; @@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, struct iommu_table *tbl) { void *addr; - unsigned long offset = 0, level_shift; + unsigned long offset = 0, level_shift, total_allocated = 0; const unsigned window_shift = ilog2(window_size); unsigned entries_shift = window_shift - page_shift; unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); @@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, - levels, tce_table_size, &offset); + levels, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) @@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = offset; + tbl->it_allocated_size = total_allocated; pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", window_size, tce_table_size, bus_offset); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2078f92d15ac..f32f843a3631 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) { - if (!vcpu->requests) - return 0; retry: kvm_s390_vcpu_request_handled(vcpu); + if (!vcpu->requests) + return 0; /* * We use MMU_RELOAD just to re-arm the ipte notifier for the * guest prefix page. gmap_ipte_notify will wait on the ptl lock. diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9f4bbc09bf07..eeda051442c3 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1032,7 +1032,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i MAX_TAIL_CALL_CNT, 0, 0x2); /* - * prog = array->prog[index]; + * prog = array->ptrs[index]; * if (prog == NULL) * goto out; */ @@ -1041,7 +1041,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); /* lg %r1,prog(%b2,%r1) */ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, - REG_1, offsetof(struct bpf_array, prog)); + REG_1, offsetof(struct bpf_array, ptrs)); /* clgij %r1,0,0x8,label0 */ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8); diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h index 1f0aa2024e94..6424249d5f78 100644 --- a/arch/sparc/include/asm/visasm.h +++ b/arch/sparc/include/asm/visasm.h @@ -28,16 +28,10 @@ * Must preserve %o5 between VISEntryHalf and VISExitHalf */ #define VISEntryHalf \ - rd %fprs, %o5; \ - andcc %o5, FPRS_FEF, %g0; \ - be,pt %icc, 297f; \ - sethi %hi(298f), %g7; \ - sethi %hi(VISenterhalf), %g1; \ - jmpl %g1 + %lo(VISenterhalf), %g0; \ - or %g7, %lo(298f), %g7; \ - clr %o5; \ -297: wr %o5, FPRS_FEF, %fprs; \ -298: + VISEntry + +#define VISExitHalf \ + VISExit #define VISEntryHalfFast(fail_label) \ rd %fprs, %o5; \ @@ -47,7 +41,7 @@ ba,a,pt %xcc, fail_label; \ 297: wr %o5, FPRS_FEF, %fprs; -#define VISExitHalf \ +#define VISExitHalfFast \ wr %o5, 0, %fprs; #ifndef __ASSEMBLY__ diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 140527a20e7d..83aeeb1dffdb 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) +#ifdef NON_USER_COPY + VISExitHalfFast +#else VISExitHalf - +#endif brz,pn %o2, .Lexit cmp %o2, 19 ble,pn %icc, .Lsmall_unaligned diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S index b320ae9e2e2e..a063d84336d6 100644 --- a/arch/sparc/lib/VISsave.S +++ b/arch/sparc/lib/VISsave.S @@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 stx %g3, [%g6 + TI_GSR] 2: add %g6, %g1, %g3 - cmp %o5, FPRS_DU - be,pn %icc, 6f - sll %g1, 3, %g1 + mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 + sll %g1, 3, %g1 stb %o5, [%g3 + TI_FPSAVED] rd %gsr, %g2 add %g6, %g1, %g3 @@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 .align 32 80: jmpl %g7 + %g0, %g0 nop - -6: ldub [%g3 + TI_FPSAVED], %o5 - or %o5, FPRS_DU, %o5 - add %g6, TI_FPREGS+0x80, %g2 - stb %o5, [%g3 + TI_FPSAVED] - - sll %g1, 5, %g1 - add %g6, TI_FPREGS+0xc0, %g3 - wr %g0, FPRS_FEF, %fprs - membar #Sync - stda %f32, [%g2 + %g1] ASI_BLK_P - stda %f48, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 80f - nop - - .align 32 -80: jmpl %g7 + %g0, %g0 - nop - - .align 32 -VISenterhalf: - ldub [%g6 + TI_FPDEPTH], %g1 - brnz,a,pn %g1, 1f - cmp %g1, 1 - stb %g0, [%g6 + TI_FPSAVED] - stx %fsr, [%g6 + TI_XFSR] - clr %o5 - jmpl %g7 + %g0, %g0 - wr %g0, FPRS_FEF, %fprs - -1: bne,pn %icc, 2f - srl %g1, 1, %g1 - ba,pt %xcc, vis1 - sub %g7, 8, %g7 -2: addcc %g6, %g1, %g3 - sll %g1, 3, %g1 - andn %o5, FPRS_DU, %g2 - stb %g2, [%g3 + TI_FPSAVED] - - rd %gsr, %g2 - add %g6, %g1, %g3 - stx %g2, [%g3 + TI_GSR] - add %g6, %g1, %g2 - stx %fsr, [%g2 + TI_XFSR] - sll %g1, 5, %g1 -3: andcc %o5, FPRS_DL, %g0 - be,pn %icc, 4f - add %g6, TI_FPREGS, %g2 - - add %g6, TI_FPREGS+0x40, %g3 - membar #Sync - stda %f0, [%g2 + %g1] ASI_BLK_P - stda %f16, [%g3 + %g1] ASI_BLK_P - membar #Sync - ba,pt %xcc, 4f - nop - - .align 32 -4: and %o5, FPRS_DU, %o5 - jmpl %g7 + %g0, %g0 - wr %o5, FPRS_FEF, %fprs diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1d649a95660c..8069ce12f20b 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page); void VISenter(void); EXPORT_SYMBOL(VISenter); -/* CRYPTO code needs this */ -void VISenterhalf(void); -EXPORT_SYMBOL(VISenterhalf); - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, unsigned long *); diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index e8c2c04143cd..c667e104a0c2 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) return -EFAULT; - memset(to, 0, sizeof(*to)); - err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 2c82bd150d43..7d69afd8b6fa 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params, unsigned int e820_type = 0; unsigned long m = efi->efi_memmap; +#ifdef CONFIG_X86_64 + m |= (u64)efi->efi_memmap_hi << 32; +#endif + d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); switch (d->type) { case EFI_RESERVED_TYPE: diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index a0bf89fd2647..4e10d73cf018 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -280,21 +280,6 @@ static inline void clear_LDT(void) set_ldt(NULL, 0); } -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 09b9620a73b4..364d27481a52 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -9,8 +9,7 @@ * we put the segment information here. */ typedef struct { - void *ldt; - int size; + struct ldt_struct *ldt; #ifdef CONFIG_X86_64 /* True if mm supports a task running in 32 bit compatibility mode. */ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 804a3a6030ca..984abfe47edc 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -33,6 +33,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) static inline void load_mm_cr4(struct mm_struct *mm) {} #endif +/* + * ldt_structs can be allocated, used, and freed, but they are never + * modified while live. + */ +struct ldt_struct { + /* + * Xen requires page-aligned LDTs with special permissions. This is + * needed to prevent us from installing evil descriptors such as + * call gates. On native, we could merge the ldt_struct and LDT + * allocations, but it's not worth trying to optimize. + */ + struct desc_struct *entries; + int size; +}; + +static inline void load_mm_ldt(struct mm_struct *mm) +{ + struct ldt_struct *ldt; + + /* lockless_dereference synchronizes with smp_store_release */ + ldt = lockless_dereference(mm->context.ldt); + + /* + * Any change to mm->context.ldt is followed by an IPI to all + * CPUs with the mm active. The LDT will not be freed until + * after the IPI is handled by all such CPUs. This means that, + * if the ldt_struct changes before we return, the values we see + * will be safe, and the new values will be loaded before we run + * any user code. + * + * NB: don't try to convert this to use RCU without extreme care. + * We would still need IRQs off, because we don't want to change + * the local LDT after an IPI loaded a newer value than the one + * that we can see. + */ + + if (unlikely(ldt)) + set_ldt(ldt->entries, ldt->size); + else + clear_LDT(); + + DEBUG_LOCKS_WARN_ON(preemptible()); +} + /* * Used for LDT copy/destruction. */ @@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, * was called and then modify_ldt changed * prev->context.ldt but suppressed an IPI to this CPU. * In this case, prev->context.ldt != NULL, because we - * never free an LDT while the mm still exists. That - * means that next->context.ldt != prev->context.ldt, - * because mms never share an LDT. + * never set context.ldt to NULL while the mm still + * exists. That means that next->context.ldt != + * prev->context.ldt, because mms never share an LDT. */ if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); + load_mm_ldt(next); } #ifdef CONFIG_SMP else { @@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, load_cr3(next->pgd); trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); load_mm_cr4(next); - load_LDT_nolock(&next->context); + load_mm_ldt(next); } } #endif diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 6fe6b182c998..9dfce4e0417d 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -57,9 +57,9 @@ struct sigcontext { unsigned long ip; unsigned long flags; unsigned short cs; - unsigned short __pad2; /* Was called gs, but was always zero. */ - unsigned short __pad1; /* Was called fs, but was always zero. */ - unsigned short ss; + unsigned short gs; + unsigned short fs; + unsigned short __pad0; unsigned long err; unsigned long trapno; unsigned long oldmask; diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h index 0e8a973de9ee..40836a9a7250 100644 --- a/arch/x86/include/uapi/asm/sigcontext.h +++ b/arch/x86/include/uapi/asm/sigcontext.h @@ -177,24 +177,9 @@ struct sigcontext { __u64 rip; __u64 eflags; /* RFLAGS */ __u16 cs; - - /* - * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), - * Linux saved and restored fs and gs in these slots. This - * was counterproductive, as fsbase and gsbase were never - * saved, so arch_prctl was presumably unreliable. - * - * If these slots are ever needed for any other purpose, there - * is some risk that very old 64-bit binaries could get - * confused. I doubt that many such binaries still work, - * though, since the same patch in 2.5.64 also removed the - * 64-bit set_thread_area syscall, so it appears that there is - * no TLS API that works in both pre- and post-2.5.64 kernels. - */ - __u16 __pad2; /* Was gs. */ - __u16 __pad1; /* Was fs. */ - - __u16 ss; + __u16 gs; + __u16 fs; + __u16 __pad0; __u64 err; __u64 trapno; __u64 oldmask; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 845dc0df2002..206052e55517 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info) */ if (irq < nr_legacy_irqs() && data->count == 1) { if (info->ioapic_trigger != data->trigger) - mp_register_handler(irq, data->trigger); + mp_register_handler(irq, info->ioapic_trigger); data->entry.trigger = data->trigger = info->ioapic_trigger; data->entry.polarity = data->polarity = info->ioapic_polarity; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 922c5e0cea4c..cb9e5df42dd2 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1410,7 +1410,7 @@ void cpu_init(void) load_sp0(t, ¤t->thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); clear_all_debug_regs(); dbg_restore_debug_regs(); @@ -1459,7 +1459,7 @@ void cpu_init(void) load_sp0(t, thread); set_tss_desc(cpu, t); load_TR_desc(); - load_LDT(&init_mm.context); + load_mm_ldt(&init_mm); t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3658de47900f..9469dfa55607 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment) int idx = segment >> 3; if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + struct ldt_struct *ldt; + if (idx > LDT_ENTRIES) return 0; - if (idx > current->active_mm->context.size) + /* IRQs are off, so this synchronizes with smp_store_release */ + ldt = lockless_dereference(current->active_mm->context.ldt); + if (!ldt || idx > ldt->size) return 0; - desc = current->active_mm->context.ldt; + desc = &ldt->entries[idx]; } else { if (idx > GDT_ENTRIES) return 0; - desc = raw_cpu_ptr(gdt_page.gdt); + desc = raw_cpu_ptr(gdt_page.gdt) + idx; } - return get_desc_base(desc + idx); + return get_desc_base(desc); } #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index c37886d759cc..2bcc0525f1c1 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -20,82 +21,82 @@ #include #include -#ifdef CONFIG_SMP +/* context.lock is held for us, so we don't need any locking. */ static void flush_ldt(void *current_mm) { - if (current->active_mm == current_mm) - load_LDT(¤t->active_mm->context); + mm_context_t *pc; + + if (current->active_mm != current_mm) + return; + + pc = ¤t->active_mm->context; + set_ldt(pc->ldt->entries, pc->ldt->size); } -#endif -static int alloc_ldt(mm_context_t *pc, int mincount, int reload) +/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */ +static struct ldt_struct *alloc_ldt_struct(int size) { - void *oldldt, *newldt; - int oldsize; + struct ldt_struct *new_ldt; + int alloc_size; - if (mincount <= pc->size) - return 0; - oldsize = pc->size; - mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & - (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); - if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) - newldt = vmalloc(mincount * LDT_ENTRY_SIZE); + if (size > LDT_ENTRIES) + return NULL; + + new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL); + if (!new_ldt) + return NULL; + + BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct)); + alloc_size = size * LDT_ENTRY_SIZE; + + /* + * Xen is very picky: it requires a page-aligned LDT that has no + * trailing nonzero bytes in any page that contains LDT descriptors. + * Keep it simple: zero the whole allocation and never allocate less + * than PAGE_SIZE. + */ + if (alloc_size > PAGE_SIZE) + new_ldt->entries = vzalloc(alloc_size); else - newldt = (void *)__get_free_page(GFP_KERNEL); + new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!newldt) - return -ENOMEM; - - if (oldsize) - memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); - oldldt = pc->ldt; - memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, - (mincount - oldsize) * LDT_ENTRY_SIZE); - - paravirt_alloc_ldt(newldt, mincount); - -#ifdef CONFIG_X86_64 - /* CHECKME: Do we really need this ? */ - wmb(); -#endif - pc->ldt = newldt; - wmb(); - pc->size = mincount; - wmb(); - - if (reload) { -#ifdef CONFIG_SMP - preempt_disable(); - load_LDT(pc); - if (!cpumask_equal(mm_cpumask(current->mm), - cpumask_of(smp_processor_id()))) - smp_call_function(flush_ldt, current->mm, 1); - preempt_enable(); -#else - load_LDT(pc); -#endif + if (!new_ldt->entries) { + kfree(new_ldt); + return NULL; } - if (oldsize) { - paravirt_free_ldt(oldldt, oldsize); - if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(oldldt); - else - put_page(virt_to_page(oldldt)); - } - return 0; + + new_ldt->size = size; + return new_ldt; } -static inline int copy_ldt(mm_context_t *new, mm_context_t *old) +/* After calling this, the LDT is immutable. */ +static void finalize_ldt_struct(struct ldt_struct *ldt) { - int err = alloc_ldt(new, old->size, 0); - int i; + paravirt_alloc_ldt(ldt->entries, ldt->size); +} - if (err < 0) - return err; +/* context.lock is held */ +static void install_ldt(struct mm_struct *current_mm, + struct ldt_struct *ldt) +{ + /* Synchronizes with lockless_dereference in load_mm_ldt. */ + smp_store_release(¤t_mm->context.ldt, ldt); - for (i = 0; i < old->size; i++) - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); - return 0; + /* Activate the LDT for all CPUs using current_mm. */ + on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); +} + +static void free_ldt_struct(struct ldt_struct *ldt) +{ + if (likely(!ldt)) + return; + + paravirt_free_ldt(ldt->entries, ldt->size); + if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE) + vfree(ldt->entries); + else + kfree(ldt->entries); + kfree(ldt); } /* @@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) */ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + struct ldt_struct *new_ldt; struct mm_struct *old_mm; int retval = 0; mutex_init(&mm->context.lock); - mm->context.size = 0; old_mm = current->mm; - if (old_mm && old_mm->context.size > 0) { - mutex_lock(&old_mm->context.lock); - retval = copy_ldt(&mm->context, &old_mm->context); - mutex_unlock(&old_mm->context.lock); + if (!old_mm) { + mm->context.ldt = NULL; + return 0; } + + mutex_lock(&old_mm->context.lock); + if (!old_mm->context.ldt) { + mm->context.ldt = NULL; + goto out_unlock; + } + + new_ldt = alloc_ldt_struct(old_mm->context.ldt->size); + if (!new_ldt) { + retval = -ENOMEM; + goto out_unlock; + } + + memcpy(new_ldt->entries, old_mm->context.ldt->entries, + new_ldt->size * LDT_ENTRY_SIZE); + finalize_ldt_struct(new_ldt); + + mm->context.ldt = new_ldt; + +out_unlock: + mutex_unlock(&old_mm->context.lock); return retval; } @@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) */ void destroy_context(struct mm_struct *mm) { - if (mm->context.size) { -#ifdef CONFIG_X86_32 - /* CHECKME: Can this ever happen ? */ - if (mm == current->active_mm) - clear_LDT(); -#endif - paravirt_free_ldt(mm->context.ldt, mm->context.size); - if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE) - vfree(mm->context.ldt); - else - put_page(virt_to_page(mm->context.ldt)); - mm->context.size = 0; - } + free_ldt_struct(mm->context.ldt); + mm->context.ldt = NULL; } static int read_ldt(void __user *ptr, unsigned long bytecount) { - int err; + int retval; unsigned long size; struct mm_struct *mm = current->mm; - if (!mm->context.size) - return 0; + mutex_lock(&mm->context.lock); + + if (!mm->context.ldt) { + retval = 0; + goto out_unlock; + } + if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; - mutex_lock(&mm->context.lock); - size = mm->context.size * LDT_ENTRY_SIZE; + size = mm->context.ldt->size * LDT_ENTRY_SIZE; if (size > bytecount) size = bytecount; - err = 0; - if (copy_to_user(ptr, mm->context.ldt, size)) - err = -EFAULT; - mutex_unlock(&mm->context.lock); - if (err < 0) - goto error_return; + if (copy_to_user(ptr, mm->context.ldt->entries, size)) { + retval = -EFAULT; + goto out_unlock; + } + if (size != bytecount) { - /* zero-fill the rest */ - if (clear_user(ptr + size, bytecount - size) != 0) { - err = -EFAULT; - goto error_return; + /* Zero-fill the rest and pretend we read bytecount bytes. */ + if (clear_user(ptr + size, bytecount - size)) { + retval = -EFAULT; + goto out_unlock; } } - return bytecount; -error_return: - return err; + retval = bytecount; + +out_unlock: + mutex_unlock(&mm->context.lock); + return retval; } static int read_default_ldt(void __user *ptr, unsigned long bytecount) @@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) struct desc_struct ldt; int error; struct user_desc ldt_info; + int oldsize, newsize; + struct ldt_struct *new_ldt, *old_ldt; error = -EINVAL; if (bytecount != sizeof(ldt_info)) @@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) goto out; } - mutex_lock(&mm->context.lock); - if (ldt_info.entry_number >= mm->context.size) { - error = alloc_ldt(¤t->mm->context, - ldt_info.entry_number + 1, 1); - if (error < 0) - goto out_unlock; - } - - /* Allow LDTs to be cleared by the user. */ - if (ldt_info.base_addr == 0 && ldt_info.limit == 0) { - if (oldmode || LDT_empty(&ldt_info)) { - memset(&ldt, 0, sizeof(ldt)); - goto install; + if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) || + LDT_empty(&ldt_info)) { + /* The user wants to clear the entry. */ + memset(&ldt, 0, sizeof(ldt)); + } else { + if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { + error = -EINVAL; + goto out; } + + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; } - if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { - error = -EINVAL; + mutex_lock(&mm->context.lock); + + old_ldt = mm->context.ldt; + oldsize = old_ldt ? old_ldt->size : 0; + newsize = max((int)(ldt_info.entry_number + 1), oldsize); + + error = -ENOMEM; + new_ldt = alloc_ldt_struct(newsize); + if (!new_ldt) goto out_unlock; - } - fill_ldt(&ldt, &ldt_info); - if (oldmode) - ldt.avl = 0; + if (old_ldt) + memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); + new_ldt->entries[ldt_info.entry_number] = ldt; + finalize_ldt_struct(new_ldt); - /* Install the new entry ... */ -install: - write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt); + install_ldt(mm, new_ldt); + free_ldt_struct(old_ldt); error = 0; out_unlock: diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71d7849a07f7..f6b916387590 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all) void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { - if (dead_task->mm->context.size) { + if (dead_task->mm->context.ldt) { pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, - dead_task->mm->context.size); + dead_task->mm->context.ldt->size); BUG(); } } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 206996c1669d..71820c42b6ce 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) COPY(r15); #endif /* CONFIG_X86_64 */ +#ifdef CONFIG_X86_32 COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); +#else /* !CONFIG_X86_32 */ + /* Kernel saves and restores only the CS segment register on signals, + * which is the bare minimum needed to allow mixed 32/64-bit code. + * App's signal handler can save/restore other segments if needed. */ + COPY_SEG_CPL3(cs); +#endif /* CONFIG_X86_32 */ get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); @@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, #else /* !CONFIG_X86_32 */ put_user_ex(regs->flags, &sc->flags); put_user_ex(regs->cs, &sc->cs); - put_user_ex(0, &sc->__pad2); - put_user_ex(0, &sc->__pad1); - put_user_ex(regs->ss, &sc->ss); + put_user_ex(0, &sc->gs); + put_user_ex(0, &sc->fs); #endif /* CONFIG_X86_32 */ put_user_ex(fpstate, &sc->fpstate); @@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, regs->sp = (unsigned long)frame; - /* - * Set up the CS and SS registers to run signal handlers in - * 64-bit mode, even if the handler happens to be interrupting - * 32-bit or 16-bit code. - * - * SS is subtle. In 64-bit mode, we don't need any particular - * SS descriptor, but we do need SS to be valid. It's possible - * that the old SS is entirely bogus -- this can happen if the - * signal we're trying to deliver is #GP or #SS caused by a bad - * SS value. - */ + /* Set up the CS register to run signal handlers in 64-bit mode, + even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; - regs->ss = __USER_DS; return 0; } diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 9b4d51d0c0d0..6273324186ac 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -5,6 +5,7 @@ #include #include #include +#include unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) { @@ -30,10 +31,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re seg &= ~7UL; mutex_lock(&child->mm->context.lock); - if (unlikely((seg >> 3) >= child->mm->context.size)) + if (unlikely(!child->mm->context.ldt || + (seg >> 3) >= child->mm->context.ldt->size)) addr = -1L; /* bogus selector, access would fault */ else { - desc = child->mm->context.ldt + seg; + desc = &child->mm->context.ldt->entries[seg]; base = get_desc_base(desc); /* 16-bit code segment? */ diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index dc0a84a6f309..9e8bf13572e6 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) if (iter.mtrr_disabled) return mtrr_disabled_type(); + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); - /* not contained in any MTRRs. */ - if (type == -1) - return mtrr_default_type(mtrr_state); - return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index ec5214f39aa8..70efcd0940f9 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -246,7 +246,7 @@ static void emit_prologue(u8 **pprog) * goto out; * if (++tail_call_cnt > MAX_TAIL_CALL_CNT) * goto out; - * prog = array->prog[index]; + * prog = array->ptrs[index]; * if (prog == NULL) * goto out; * goto *(prog->bpf_func + prologue_size); @@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ - /* prog = array->prog[index]; */ + /* prog = array->ptrs[index]; */ EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ - offsetof(struct bpf_array, prog)); + offsetof(struct bpf_array, ptrs)); EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ /* if (prog == NULL) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index cfba30f27392..e4308fe6afe8 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr) static int __init arch_parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "old_map")) set_bit(EFI_OLD_MEMMAP, &efi.flags); if (parse_option_str(str, "debug")) diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 0d7dd1f5ac36..9ab52791fed5 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -22,6 +22,7 @@ #include #include #include +#include #ifdef CONFIG_X86_32 __visible unsigned long saved_context_ebx; @@ -153,7 +154,7 @@ static void fix_processor_context(void) syscall_init(); /* This sets MSR_*STAR and related */ #endif load_TR_desc(); /* This does ltr */ - load_LDT(¤t->active_mm->context); /* This does lldt */ + load_mm_ldt(current->active_mm); /* This does lldt */ fpu__resume_cpu(); } diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 7322755f337a..4b6e29ac0968 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp) obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ time.o xen-asm.o xen-asm_$(BITS).o \ grant-table.o suspend.o platform-pci-unplug.o \ - p2m.o + p2m.o apic.o obj-$(CONFIG_EVENT_TRACING) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_DOM0) += apic.o vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0b95c9b8283f..11d6fb4e8483 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte_t pte; unsigned long pfn; struct page *page; + unsigned char dummy; ptep = lookup_address((unsigned long)v, &level); BUG_ON(ptep == NULL); @@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) pte = pfn_pte(pfn, prot); + /* + * Careful: update_va_mapping() will fail if the virtual address + * we're poking isn't populated in the page tables. We don't + * need to worry about the direct map (that's always in the page + * tables), but we need to be careful about vmap space. In + * particular, the top level page table can lazily propagate + * entries between processes, so if we've switched mms since we + * vmapped the target in the first place, we might not have the + * top-level page table entry populated. + * + * We disable preemption because we want the same mm active when + * we probe the target and when we issue the hypercall. We'll + * have the same nominal mm, but if we're a kernel thread, lazy + * mm dropping could change our pgd. + * + * Out of an abundance of caution, this uses __get_user() to fault + * in the target address just in case there's some obscure case + * in which the target address isn't readable. + */ + + preempt_disable(); + + pagefault_disable(); /* Avoid warnings due to being atomic. */ + __get_user(dummy, (unsigned char __user __force *)v); + pagefault_enable(); + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) BUG(); @@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) BUG(); } else kmap_flush_unused(); + + preempt_enable(); } static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) @@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; int i; + /* + * We need to mark the all aliases of the LDT pages RO. We + * don't need to call vm_flush_aliases(), though, since that's + * only responsible for flushing aliases out the TLBs, not the + * page tables, and Xen will flush the TLB for us if needed. + * + * To avoid confusing future readers: none of this is necessary + * to load the LDT. The hypervisor only checks this when the + * LDT is faulted in due to subsequent descriptor access. + */ + for(i = 0; i < entries; i += entries_per_page) set_aliased_prot(ldt + i, PAGE_KERNEL_RO); } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index c20fe29e65f4..2292721b1d10 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -101,17 +101,15 @@ struct dom0_vga_console_info; #ifdef CONFIG_XEN_DOM0 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); -void __init xen_init_apic(void); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) { } -static inline void __init xen_init_apic(void) -{ -} #endif +void __init xen_init_apic(void); + #ifdef CONFIG_XEN_EFI extern void xen_efi_init(void); #else diff --git a/drivers/base/property.c b/drivers/base/property.c index f3f6d167f3f1..4c2082899322 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include /** * device_add_property_set - Add a collection of properties to a device object. @@ -533,3 +535,79 @@ bool device_dma_is_coherent(struct device *dev) return coherent; } EXPORT_SYMBOL_GPL(device_dma_is_coherent); + +/** + * device_get_phy_mode - Get phy mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ +int device_get_phy_mode(struct device *dev) +{ + const char *pm; + int err, i; + + err = device_property_read_string(dev, "phy-mode", &pm); + if (err < 0) + err = device_property_read_string(dev, + "phy-connection-type", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) + if (!strcasecmp(pm, phy_modes(i))) + return i; + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(device_get_phy_mode); + +static void *device_get_mac_addr(struct device *dev, + const char *name, char *addr, + int alen) +{ + int ret = device_property_read_u8_array(dev, name, addr, alen); + + if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) + return addr; + return NULL; +} + +/** + * device_get_mac_address - Get the MAC for a given device + * @dev: Pointer to the device + * @addr: Address of buffer to store the MAC in + * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN + * + * Search the firmware node for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. +*/ +void *device_get_mac_address(struct device *dev, char *addr, int alen) +{ + addr = device_get_mac_addr(dev, "mac-address", addr, alen); + if (addr) + return addr; + + addr = device_get_mac_addr(dev, "local-mac-address", addr, alen); + if (addr) + return addr; + + return device_get_mac_addr(dev, "address", addr, alen); +} +EXPORT_SYMBOL(device_get_mac_address); diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 81751a49d8bf..56486d92c4e7 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, if (!blk) return -ENOMEM; - present = krealloc(rbnode->cache_present, - BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); - if (!present) { - kfree(blk); - return -ENOMEM; + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), + GFP_KERNEL); + if (!present) { + kfree(blk); + return -ENOMEM; + } + + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) + * sizeof(*present)); + } else { + present = rbnode->cache_present; } /* insert the register value in the correct place in the rbnode block */ diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index be5fffb6da24..023d448ed3fa 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -92,7 +92,7 @@ config BCMA_DRIVER_GMAC_CMN config BCMA_DRIVER_GPIO bool "BCMA GPIO driver" depends on BCMA && GPIOLIB - select IRQ_DOMAIN if BCMA_HOST_SOC + select GPIOLIB_IRQCHIP if BCMA_HOST_SOC help Driver to provide access to the GPIO pins of the bcma bus. diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 15f2b2e242ea..38f156745d53 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus); int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); #endif +struct device *bcma_bus_get_host_dev(struct bcma_bus *bus); /* scan.c */ void bcma_detect_chip(struct bcma_bus *bus); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 5f6018e7cd4c..504899a72966 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -8,10 +8,8 @@ * Licensed under the GNU/GPL. See COPYING for details. */ -#include -#include +#include #include -#include #include #include @@ -79,19 +77,11 @@ static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio) } #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) -static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) -{ - struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip); - - if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) - return irq_find_mapping(cc->irq_domain, gpio); - else - return -EINVAL; -} static void bcma_gpio_irq_unmask(struct irq_data *d) { - struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc); int gpio = irqd_to_hwirq(d); u32 val = bcma_chipco_gpio_in(cc, BIT(gpio)); @@ -101,7 +91,8 @@ static void bcma_gpio_irq_unmask(struct irq_data *d) static void bcma_gpio_irq_mask(struct irq_data *d) { - struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc); int gpio = irqd_to_hwirq(d); bcma_chipco_gpio_intmask(cc, BIT(gpio), 0); @@ -116,6 +107,7 @@ static struct irq_chip bcma_gpio_irq_chip = { static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) { struct bcma_drv_cc *cc = dev_id; + struct gpio_chip *gc = &cc->gpio; u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN); u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ); u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL); @@ -125,81 +117,58 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) if (!irqs) return IRQ_NONE; - for_each_set_bit(gpio, &irqs, cc->gpio.ngpio) - generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio)); + for_each_set_bit(gpio, &irqs, gc->ngpio) + generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio)); bcma_chipco_gpio_polarity(cc, irqs, val & irqs); return IRQ_HANDLED; } -static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) +static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { struct gpio_chip *chip = &cc->gpio; - int gpio, hwirq, err; + int hwirq, err; if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return 0; - cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio, - &irq_domain_simple_ops, cc); - if (!cc->irq_domain) { - err = -ENODEV; - goto err_irq_domain; - } - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_create_mapping(cc->irq_domain, gpio); - - irq_set_chip_data(irq, cc); - irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip, - handle_simple_irq); - } - hwirq = bcma_core_irq(cc->core, 0); err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio", cc); if (err) - goto err_req_irq; + return err; bcma_chipco_gpio_intmask(cc, ~0, 0); bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); - return 0; - -err_req_irq: - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_find_mapping(cc->irq_domain, gpio); - - irq_dispose_mapping(irq); + err = gpiochip_irqchip_add(chip, + &bcma_gpio_irq_chip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (err) { + free_irq(hwirq, cc); + return err; } - irq_domain_remove(cc->irq_domain); -err_irq_domain: - return err; + + return 0; } -static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc) +static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { - struct gpio_chip *chip = &cc->gpio; - int gpio; - if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) return; bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO); free_irq(bcma_core_irq(cc->core, 0), cc); - for (gpio = 0; gpio < chip->ngpio; gpio++) { - int irq = irq_find_mapping(cc->irq_domain, gpio); - - irq_dispose_mapping(irq); - } - irq_domain_remove(cc->irq_domain); } #else -static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc) +static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { return 0; } -static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc) +static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc) { } #endif @@ -218,9 +187,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->set = bcma_gpio_set_value; chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; -#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X) - chip->to_irq = bcma_gpio_to_irq; -#endif + chip->owner = THIS_MODULE; + chip->dev = bcma_bus_get_host_dev(bus); #if IS_BUILTIN(CONFIG_OF) if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) chip->of_node = cc->core->dev.of_node; @@ -248,13 +216,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) else chip->base = -1; - err = bcma_gpio_irq_domain_init(cc); + err = gpiochip_add(chip); if (err) return err; - err = gpiochip_add(chip); + err = bcma_gpio_irq_init(cc); if (err) { - bcma_gpio_irq_domain_exit(cc); + gpiochip_remove(chip); return err; } @@ -263,7 +231,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) int bcma_gpio_unregister(struct bcma_drv_cc *cc) { - bcma_gpio_irq_domain_exit(cc); + bcma_gpio_irq_exit(cc); gpiochip_remove(&cc->gpio); return 0; } diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 8d973c4fc84e..24882c18fcbe 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -7,7 +7,9 @@ #include "bcma_private.h" #include +#include #include +#include #include #include #include @@ -269,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) } } +struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) +{ + switch (bus->hosttype) { + case BCMA_HOSTTYPE_PCI: + if (bus->host_pci) + return &bus->host_pci->dev; + else + return NULL; + case BCMA_HOSTTYPE_SOC: + if (bus->host_pdev) + return &bus->host_pdev->dev; + else + return NULL; + case BCMA_HOSTTYPE_SDIO: + if (bus->host_sdio) + return &bus->host_sdio->dev; + else + return NULL; + } + return NULL; +} + void bcma_init_bus(struct bcma_bus *bus) { mutex_lock(&bcma_buses_mutex); @@ -388,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; + struct device *dev; /* Scan for devices (cores) */ err = bcma_bus_scan(bus); @@ -410,13 +435,12 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_pci_early_init(&bus->drv_pci[0]); } + dev = bcma_bus_get_host_dev(bus); /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when * of_default_bus_match_table is exported or in some other way * accessible. This is just a temporary workaround. */ - if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) { - struct device *dev = &bus->host_pdev->dev; - + if (IS_BUILTIN(CONFIG_BCMA) && dev) { of_platform_populate(dev->of_node, of_default_bus_match_table, NULL, dev); } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d94529d5c8e9..bc67a93aa4f4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); @@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) obj_request_done_set(obj_request); } +static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) +{ + dout("%s: obj %p\n", __func__, obj_request); + + if (obj_request_img_data_test(obj_request)) + rbd_osd_copyup_callback(obj_request); + else + obj_request_done_set(obj_request); +} + static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, struct ceph_msg *msg) { @@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_discard_callback(obj_request); break; case CEPH_OSD_OP_CALL: + rbd_osd_call_callback(obj_request); + break; case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_WATCH: rbd_osd_trivial_callback(obj_request); @@ -2530,13 +2543,15 @@ out_unwind: } static void -rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; struct page **pages; u32 page_count; + dout("%s: obj %p\n", __func__, obj_request); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO || obj_request->type == OBJ_REQUEST_NODATA); rbd_assert(obj_request_img_data_test(obj_request)); @@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) if (!obj_request->result) obj_request->xferred = obj_request->length; - /* Finish up with the normal image object callback */ - - rbd_img_obj_callback(obj_request); + obj_request_done_set(obj_request); } static void @@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) /* All set, send it off. */ - orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; img_result = rbd_obj_request_submit(osdc, orig_request); if (!img_result) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index ced96777b677..954c0029fb3b 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif) return; } - if (work_pending(&blkif->persistent_purge_work)) { - pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); + if (work_busy(&blkif->persistent_purge_work)) { + pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n"); return; } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 6d89ed35d80c..7a8a73f1fc04 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock); ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) static int blkfront_setup_indirect(struct blkfront_info *info); +static int blkfront_gather_backend_features(struct blkfront_info *info); static int get_id_from_freelist(struct blkfront_info *info) { @@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, * Add the used indirect page back to the list of * available pages for indirect grefs. */ - indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); - list_add(&indirect_page->lru, &info->indirect_pages); + if (!info->feature_persistent) { + indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); + list_add(&indirect_page->lru, &info->indirect_pages); + } s->indirect_grants[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->indirect_grants[i]->node, &info->grants); } @@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info) info->shadow_free = info->ring.req_prod_pvt; info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; - rc = blkfront_setup_indirect(info); + rc = blkfront_gather_backend_features(info); if (rc) { kfree(copy); return rc; @@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info) static int blkfront_setup_indirect(struct blkfront_info *info) { - unsigned int indirect_segments, segs; + unsigned int segs; int err, i; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-max-indirect-segments", "%u", &indirect_segments, - NULL); - if (err) { - info->max_indirect_segments = 0; + if (info->max_indirect_segments == 0) segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; - } else { - info->max_indirect_segments = min(indirect_segments, - xen_blkif_max_segments); + else segs = info->max_indirect_segments; - } err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); if (err) @@ -1796,6 +1792,68 @@ out_of_memory: return -ENOMEM; } +/* + * Gather all backend feature-* + */ +static int blkfront_gather_backend_features(struct blkfront_info *info) +{ + int err; + int barrier, flush, discard, persistent; + unsigned int indirect_segments; + + info->feature_flush = 0; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%d", &barrier, + NULL); + + /* + * If there's no "feature-barrier" defined, then it means + * we're dealing with a very old backend which writes + * synchronously; nothing to do. + * + * If there are barriers, then we use flush. + */ + if (!err && barrier) + info->feature_flush = REQ_FLUSH | REQ_FUA; + /* + * And if there is "feature-flush-cache" use that above + * barriers. + */ + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush, + NULL); + + if (!err && flush) + info->feature_flush = REQ_FLUSH; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-discard", "%d", &discard, + NULL); + + if (!err && discard) + blkfront_setup_discard(info); + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-persistent", "%u", &persistent, + NULL); + if (err) + info->feature_persistent = 0; + else + info->feature_persistent = persistent; + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-max-indirect-segments", "%u", &indirect_segments, + NULL); + if (err) + info->max_indirect_segments = 0; + else + info->max_indirect_segments = min(indirect_segments, + xen_blkif_max_segments); + + return blkfront_setup_indirect(info); +} + /* * Invoked when the backend is finally 'ready' (and has told produced * the details about the physical device - #sectors, size, etc). @@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info) unsigned int physical_sector_size; unsigned int binfo; int err; - int barrier, flush, discard, persistent; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info) if (err != 1) physical_sector_size = sector_size; - info->feature_flush = 0; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%d", &barrier, - NULL); - - /* - * If there's no "feature-barrier" defined, then it means - * we're dealing with a very old backend which writes - * synchronously; nothing to do. - * - * If there are barriers, then we use flush. - */ - if (!err && barrier) - info->feature_flush = REQ_FLUSH | REQ_FUA; - /* - * And if there is "feature-flush-cache" use that above - * barriers. - */ - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-flush-cache", "%d", &flush, - NULL); - - if (!err && flush) - info->feature_flush = REQ_FLUSH; - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-discard", "%d", &discard, - NULL); - - if (!err && discard) - blkfront_setup_discard(info); - - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-persistent", "%u", &persistent, - NULL); - if (err) - info->feature_persistent = 0; - else - info->feature_persistent = persistent; - - err = blkfront_setup_indirect(info); + err = blkfront_gather_backend_features(info); if (err) { xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", info->xbdev->otherend); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 79e8234b1aa5..0bd88c942a52 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -13,6 +13,10 @@ config BT_RTL tristate select FW_LOADER +config BT_QCA + tristate + select FW_LOADER + config BT_HCIBTUSB tristate "HCI USB driver" depends on USB @@ -151,6 +155,19 @@ config BT_HCIUART_BCM Say Y here to compile support for Broadcom protocol. +config BT_HCIUART_QCA + bool "Qualcomm Atheros protocol support" + depends on BT_HCIUART + select BT_HCIUART_H4 + select BT_QCA + help + The Qualcomm Atheros protocol supports HCI In-Band Sleep feature + over serial port interface(H4) between controller and host. + This protocol is required for UART clock control for QCA Bluetooth + devices. + + Say Y here to compile support for QCA protocol. + config BT_HCIBCM203X tristate "HCI BCM203x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index f40e194e7080..07c9cf381e5a 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o obj-$(CONFIG_BT_WILINK) += btwilink.o obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_RTL) += btrtl.o +obj-$(CONFIG_BT_QCA) += btqca.o btmrvl-y := btmrvl_main.o btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o @@ -34,6 +35,7 @@ hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o +hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o hci_uart-objs := $(hci_uart-y) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index b9a811900f6a..7c097629e593 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1071,8 +1071,6 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) } } - sdio_release_host(card->func); - /* * winner or not, with this test the FW synchronizes when the * module can continue its initialization @@ -1082,6 +1080,8 @@ static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) return -ETIMEDOUT; } + sdio_release_host(card->func); + return 0; done: diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c new file mode 100644 index 000000000000..4a6208168850 --- /dev/null +++ b/drivers/bluetooth/btqca.c @@ -0,0 +1,392 @@ +/* + * Bluetooth supports for Qualcomm Atheros chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include + +#include +#include + +#include "btqca.h" + +#define VERSION "0.1" + +static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + struct rome_version *ver; + char cmd; + int err = 0; + + BT_DBG("%s: ROME Patch Version Request", hdev->name); + + cmd = EDL_PATCH_VER_REQ_CMD; + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, + &cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name, + err); + return err; + } + + if (skb->len != sizeof(*edl) + sizeof(*ver)) { + BT_ERR("%s: Version size mismatch len %d", hdev->name, + skb->len); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl || !edl->data) { + BT_ERR("%s: TLV with no header or no data", hdev->name); + err = -EILSEQ; + goto out; + } + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != EDL_APP_VER_RES_EVT) { + BT_ERR("%s: Wrong packet received %d %d", hdev->name, + edl->cresp, edl->rtype); + err = -EIO; + goto out; + } + + ver = (struct rome_version *)(edl->data); + + BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id)); + BT_DBG("%s: Patch :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver)); + BT_DBG("%s: ROM :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver)); + BT_DBG("%s: SOC :0x%08x", hdev->name, le32_to_cpu(ver->soc_id)); + + /* ROME chipset version can be decided by patch and SoC + * version, combination with upper 2 bytes from SoC + * and lower 2 bytes from patch will be used. + */ + *rome_version = (le32_to_cpu(ver->soc_id) << 16) | + (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + +out: + kfree_skb(skb); + + return err; +} + +static int rome_reset(struct hci_dev *hdev) +{ + struct sk_buff *skb; + int err; + + BT_DBG("%s: ROME HCI_RESET", hdev->name); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Reset failed (%d)", hdev->name, err); + return err; + } + + kfree_skb(skb); + + return 0; +} + +static void rome_tlv_check_data(struct rome_config *config, + const struct firmware *fw) +{ + const u8 *data; + u32 type_len; + u16 tag_id, tag_len; + int idx, length; + struct tlv_type_hdr *tlv; + struct tlv_type_patch *tlv_patch; + struct tlv_type_nvm *tlv_nvm; + + tlv = (struct tlv_type_hdr *)fw->data; + + type_len = le32_to_cpu(tlv->type_len); + length = (type_len >> 8) & 0x00ffffff; + + BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); + BT_DBG("Length\t\t : %d bytes", length); + + switch (config->type) { + case TLV_TYPE_PATCH: + tlv_patch = (struct tlv_type_patch *)tlv->data; + BT_DBG("Total Length\t\t : %d bytes", + le32_to_cpu(tlv_patch->total_size)); + BT_DBG("Patch Data Length\t : %d bytes", + le32_to_cpu(tlv_patch->data_length)); + BT_DBG("Signing Format Version : 0x%x", + tlv_patch->format_version); + BT_DBG("Signature Algorithm\t : 0x%x", + tlv_patch->signature); + BT_DBG("Reserved\t\t : 0x%x", + le16_to_cpu(tlv_patch->reserved1)); + BT_DBG("Product ID\t\t : 0x%04x", + le16_to_cpu(tlv_patch->product_id)); + BT_DBG("Rom Build Version\t : 0x%04x", + le16_to_cpu(tlv_patch->rom_build)); + BT_DBG("Patch Version\t\t : 0x%04x", + le16_to_cpu(tlv_patch->patch_version)); + BT_DBG("Reserved\t\t : 0x%x", + le16_to_cpu(tlv_patch->reserved2)); + BT_DBG("Patch Entry Address\t : 0x%x", + le32_to_cpu(tlv_patch->entry)); + break; + + case TLV_TYPE_NVM: + idx = 0; + data = tlv->data; + while (idx < length) { + tlv_nvm = (struct tlv_type_nvm *)(data + idx); + + tag_id = le16_to_cpu(tlv_nvm->tag_id); + tag_len = le16_to_cpu(tlv_nvm->tag_len); + + /* Update NVM tags as needed */ + switch (tag_id) { + case EDL_TAG_ID_HCI: + /* HCI transport layer parameters + * enabling software inband sleep + * onto controller side. + */ + tlv_nvm->data[0] |= 0x80; + + /* UART Baud Rate */ + tlv_nvm->data[2] = config->user_baud_rate; + + break; + + case EDL_TAG_ID_DEEP_SLEEP: + /* Sleep enable mask + * enabling deep sleep feature on controller. + */ + tlv_nvm->data[0] |= 0x01; + + break; + } + + idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len); + } + break; + + default: + BT_ERR("Unknown TLV type %d", config->type); + break; + } +} + +static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size, + const u8 *data) +{ + struct sk_buff *skb; + struct edl_event_hdr *edl; + struct tlv_seg_resp *tlv_resp; + u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2]; + int err = 0; + + BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size); + + cmd[0] = EDL_PATCH_TLV_REQ_CMD; + cmd[1] = seg_size; + memcpy(cmd + 2, data, seg_size); + + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, + HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err); + return err; + } + + if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) { + BT_ERR("%s: TLV response size mismatch", hdev->name); + err = -EILSEQ; + goto out; + } + + edl = (struct edl_event_hdr *)(skb->data); + if (!edl || !edl->data) { + BT_ERR("%s: TLV with no header or no data", hdev->name); + err = -EILSEQ; + goto out; + } + + tlv_resp = (struct tlv_seg_resp *)(edl->data); + + if (edl->cresp != EDL_CMD_REQ_RES_EVT || + edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) { + BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)", + hdev->name, edl->cresp, edl->rtype, tlv_resp->result); + err = -EIO; + } + +out: + kfree_skb(skb); + + return err; +} + +static int rome_tlv_download_request(struct hci_dev *hdev, + const struct firmware *fw) +{ + const u8 *buffer, *data; + int total_segment, remain_size; + int ret, i; + + if (!fw || !fw->data) + return -EINVAL; + + total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT; + remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT; + + BT_DBG("%s: Total segment num %d remain size %d total size %zu", + hdev->name, total_segment, remain_size, fw->size); + + data = fw->data; + for (i = 0; i < total_segment; i++) { + buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT; + ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT, + buffer); + if (ret < 0) + return -EIO; + } + + if (remain_size) { + buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT; + ret = rome_tlv_send_segment(hdev, total_segment, remain_size, + buffer); + if (ret < 0) + return -EIO; + } + + return 0; +} + +static int rome_download_firmware(struct hci_dev *hdev, + struct rome_config *config) +{ + const struct firmware *fw; + int ret; + + BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname); + + ret = request_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + BT_ERR("%s: Failed to request file: %s (%d)", hdev->name, + config->fwname, ret); + return ret; + } + + rome_tlv_check_data(config, fw); + + ret = rome_tlv_download_request(hdev, fw); + if (ret) { + BT_ERR("%s: Failed to download file: %s (%d)", hdev->name, + config->fwname, ret); + } + + release_firmware(fw); + + return ret; +} + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + u8 cmd[9]; + int err; + + cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD; + cmd[1] = 0x02; /* TAG ID */ + cmd[2] = sizeof(bdaddr_t); /* size */ + memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t)); + skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd, + HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + BT_ERR("%s: Change address command failed (%d)", + hdev->name, err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); + +int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate) +{ + u32 rome_ver = 0; + struct rome_config config; + int err; + + BT_DBG("%s: ROME setup on UART", hdev->name); + + config.user_baud_rate = baudrate; + + /* Get ROME version information */ + err = rome_patch_ver_req(hdev, &rome_ver); + if (err < 0 || rome_ver == 0) { + BT_ERR("%s: Failed to get version 0x%x", hdev->name, err); + return err; + } + + BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver); + + /* Download rampatch file */ + config.type = TLV_TYPE_PATCH; + snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", + rome_ver); + err = rome_download_firmware(hdev, &config); + if (err < 0) { + BT_ERR("%s: Failed to download patch (%d)", hdev->name, err); + return err; + } + + /* Download NVM configuration */ + config.type = TLV_TYPE_NVM; + snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", + rome_ver); + err = rome_download_firmware(hdev, &config); + if (err < 0) { + BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err); + return err; + } + + /* Perform HCI reset */ + err = rome_reset(hdev); + if (err < 0) { + BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err); + return err; + } + + BT_INFO("%s: ROME setup on UART is completed", hdev->name); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_uart_setup_rome); + +MODULE_AUTHOR("Ben Young Tae Kim "); +MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h new file mode 100644 index 000000000000..65e994b96c47 --- /dev/null +++ b/drivers/bluetooth/btqca.h @@ -0,0 +1,135 @@ +/* + * Bluetooth supports for Qualcomm Atheros ROME chips + * + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#define EDL_PATCH_CMD_OPCODE (0xFC00) +#define EDL_NVM_ACCESS_OPCODE (0xFC0B) +#define EDL_PATCH_CMD_LEN (1) +#define EDL_PATCH_VER_REQ_CMD (0x19) +#define EDL_PATCH_TLV_REQ_CMD (0x1E) +#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) +#define MAX_SIZE_PER_TLV_SEGMENT (243) + +#define EDL_CMD_REQ_RES_EVT (0x00) +#define EDL_PATCH_VER_RES_EVT (0x19) +#define EDL_APP_VER_RES_EVT (0x02) +#define EDL_TVL_DNLD_RES_EVT (0x04) +#define EDL_CMD_EXE_STATUS_EVT (0x00) +#define EDL_SET_BAUDRATE_RSP_EVT (0x92) +#define EDL_NVM_ACCESS_CODE_EVT (0x0B) + +#define EDL_TAG_ID_HCI (17) +#define EDL_TAG_ID_DEEP_SLEEP (27) + +enum qca_bardrate { + QCA_BAUDRATE_115200 = 0, + QCA_BAUDRATE_57600, + QCA_BAUDRATE_38400, + QCA_BAUDRATE_19200, + QCA_BAUDRATE_9600, + QCA_BAUDRATE_230400, + QCA_BAUDRATE_250000, + QCA_BAUDRATE_460800, + QCA_BAUDRATE_500000, + QCA_BAUDRATE_720000, + QCA_BAUDRATE_921600, + QCA_BAUDRATE_1000000, + QCA_BAUDRATE_1250000, + QCA_BAUDRATE_2000000, + QCA_BAUDRATE_3000000, + QCA_BAUDRATE_4000000, + QCA_BAUDRATE_1600000, + QCA_BAUDRATE_3200000, + QCA_BAUDRATE_3500000, + QCA_BAUDRATE_AUTO = 0xFE, + QCA_BAUDRATE_RESERVED +}; + +enum rome_tlv_type { + TLV_TYPE_PATCH = 1, + TLV_TYPE_NVM +}; + +struct rome_config { + u8 type; + char fwname[64]; + uint8_t user_baud_rate; +}; + +struct edl_event_hdr { + __u8 cresp; + __u8 rtype; + __u8 data[0]; +} __packed; + +struct rome_version { + __le32 product_id; + __le16 patch_ver; + __le16 rome_ver; + __le32 soc_id; +} __packed; + +struct tlv_seg_resp { + __u8 result; +} __packed; + +struct tlv_type_patch { + __le32 total_size; + __le32 data_length; + __u8 format_version; + __u8 signature; + __le16 reserved1; + __le16 product_id; + __le16 rom_build; + __le16 patch_version; + __le16 reserved2; + __le32 entry; +} __packed; + +struct tlv_type_nvm { + __le16 tag_id; + __le16 tag_len; + __le32 reserve1; + __le32 reserve2; + __u8 data[0]; +} __packed; + +struct tlv_type_hdr { + __le32 type_len; + __u8 data[0]; +} __packed; + +#if IS_ENABLED(CONFIG_BT_QCA) + +int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); +int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate); + +#else + +static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + +static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index cc92b0f84a51..f759dea7d3ba 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -322,6 +322,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, + /* Silicon Wave based devices */ + { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, + { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 23523e140a9a..322302b04710 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -25,6 +25,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include @@ -32,11 +38,37 @@ #include "btbcm.h" #include "hci_uart.h" -struct bcm_data { - struct sk_buff *rx_skb; - struct sk_buff_head txq; +struct bcm_device { + struct list_head list; + + struct platform_device *pdev; + + const char *name; + struct gpio_desc *device_wakeup; + struct gpio_desc *shutdown; + + struct clk *clk; + bool clk_enabled; + + u32 init_speed; + +#ifdef CONFIG_PM_SLEEP + struct hci_uart *hu; + bool is_suspended; /* suspend/resume flag */ +#endif }; +struct bcm_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; + + struct bcm_device *dev; +}; + +/* List of BCM BT UART devices */ +static DEFINE_SPINLOCK(bcm_device_list_lock); +static LIST_HEAD(bcm_device_list); + static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; @@ -86,9 +118,41 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) return 0; } +/* bcm_device_exists should be protected by bcm_device_list_lock */ +static bool bcm_device_exists(struct bcm_device *device) +{ + struct list_head *p; + + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + if (device == dev) + return true; + } + + return false; +} + +static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) +{ + if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) + clk_enable(dev->clk); + + gpiod_set_value_cansleep(dev->shutdown, powered); + gpiod_set_value_cansleep(dev->device_wakeup, powered); + + if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled) + clk_disable(dev->clk); + + dev->clk_enabled = powered; + + return 0; +} + static int bcm_open(struct hci_uart *hu) { struct bcm_data *bcm; + struct list_head *p; BT_DBG("hu %p", hu); @@ -99,6 +163,30 @@ static int bcm_open(struct hci_uart *hu) skb_queue_head_init(&bcm->txq); hu->priv = bcm; + + spin_lock(&bcm_device_list_lock); + list_for_each(p, &bcm_device_list) { + struct bcm_device *dev = list_entry(p, struct bcm_device, list); + + /* Retrieve saved bcm_device based on parent of the + * platform device (saved during device probe) and + * parent of tty device used by hci_uart + */ + if (hu->tty->dev->parent == dev->pdev->dev.parent) { + bcm->dev = dev; + hu->init_speed = dev->init_speed; +#ifdef CONFIG_PM_SLEEP + dev->hu = hu; +#endif + break; + } + } + + if (bcm->dev) + bcm_gpio_set_power(bcm->dev, true); + + spin_unlock(&bcm_device_list_lock); + return 0; } @@ -108,6 +196,16 @@ static int bcm_close(struct hci_uart *hu) BT_DBG("hu %p", hu); + /* Protect bcm->dev against removal of the device or driver */ + spin_lock(&bcm_device_list_lock); + if (bcm_device_exists(bcm->dev)) { + bcm_gpio_set_power(bcm->dev, false); +#ifdef CONFIG_PM_SLEEP + bcm->dev->hu = NULL; +#endif + } + spin_unlock(&bcm_device_list_lock); + skb_queue_purge(&bcm->txq); kfree_skb(bcm->rx_skb); kfree(bcm); @@ -232,6 +330,188 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu) return skb_dequeue(&bcm->txq); } +#ifdef CONFIG_PM_SLEEP +/* Platform suspend callback */ +static int bcm_suspend(struct device *dev) +{ + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); + + BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended); + + if (!bdev->is_suspended) { + hci_uart_set_flow_control(bdev->hu, true); + + /* Once this callback returns, driver suspends BT via GPIO */ + bdev->is_suspended = true; + } + + /* Suspend the device */ + if (bdev->device_wakeup) { + gpiod_set_value(bdev->device_wakeup, false); + BT_DBG("suspend, delaying 15 ms"); + mdelay(15); + } + + return 0; +} + +/* Platform resume callback */ +static int bcm_resume(struct device *dev) +{ + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); + + BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended); + + if (bdev->device_wakeup) { + gpiod_set_value(bdev->device_wakeup, true); + BT_DBG("resume, delaying 15 ms"); + mdelay(15); + } + + /* When this callback executes, the device has woken up already */ + if (bdev->is_suspended) { + bdev->is_suspended = false; + + hci_uart_set_flow_control(bdev->hu, false); + } + + return 0; +} +#endif + +static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false }; +static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false }; + +static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = { + { "device-wakeup-gpios", &device_wakeup_gpios, 1 }, + { "shutdown-gpios", &shutdown_gpios, 1 }, + { }, +}; + +#ifdef CONFIG_ACPI +static int bcm_resource(struct acpi_resource *ares, void *data) +{ + struct bcm_device *dev = data; + + if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { + struct acpi_resource_uart_serialbus *sb; + + sb = &ares->data.uart_serial_bus; + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) + dev->init_speed = sb->default_baud_rate; + } + + /* Always tell the ACPI core to skip this resource */ + return 1; +} + +static int bcm_acpi_probe(struct bcm_device *dev) +{ + struct platform_device *pdev = dev->pdev; + const struct acpi_device_id *id; + struct acpi_device *adev; + LIST_HEAD(resources); + int ret; + + id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); + if (!id) + return -ENODEV; + + /* Retrieve GPIO data */ + dev->name = dev_name(&pdev->dev); + ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev), + acpi_bcm_default_gpios); + if (ret) + return ret; + + dev->clk = devm_clk_get(&pdev->dev, NULL); + + dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev, + "device-wakeup", + GPIOD_OUT_LOW); + if (IS_ERR(dev->device_wakeup)) + return PTR_ERR(dev->device_wakeup); + + dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown", + GPIOD_OUT_LOW); + if (IS_ERR(dev->shutdown)) + return PTR_ERR(dev->shutdown); + + /* Make sure at-least one of the GPIO is defined and that + * a name is specified for this instance + */ + if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) { + dev_err(&pdev->dev, "invalid platform data\n"); + return -EINVAL; + } + + /* Retrieve UART ACPI info */ + adev = ACPI_COMPANION(&dev->pdev->dev); + if (!adev) + return 0; + + acpi_dev_get_resources(adev, &resources, bcm_resource, dev); + + return 0; +} +#else +static int bcm_acpi_probe(struct bcm_device *dev) +{ + return -EINVAL; +} +#endif /* CONFIG_ACPI */ + +static int bcm_probe(struct platform_device *pdev) +{ + struct bcm_device *dev; + struct acpi_device_id *pdata = pdev->dev.platform_data; + int ret; + + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->pdev = pdev; + + if (ACPI_HANDLE(&pdev->dev)) { + ret = bcm_acpi_probe(dev); + if (ret) + return ret; + } else if (pdata) { + dev->name = pdata->id; + } else { + return -ENODEV; + } + + platform_set_drvdata(pdev, dev); + + dev_info(&pdev->dev, "%s device registered.\n", dev->name); + + /* Place this instance on the device list */ + spin_lock(&bcm_device_list_lock); + list_add_tail(&dev->list, &bcm_device_list); + spin_unlock(&bcm_device_list_lock); + + bcm_gpio_set_power(dev, false); + + return 0; +} + +static int bcm_remove(struct platform_device *pdev) +{ + struct bcm_device *dev = platform_get_drvdata(pdev); + + spin_lock(&bcm_device_list_lock); + list_del(&dev->list); + spin_unlock(&bcm_device_list_lock); + + acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); + + dev_info(&pdev->dev, "%s device unregistered.\n", dev->name); + + return 0; +} + static const struct hci_uart_proto bcm_proto = { .id = HCI_UART_BCM, .name = "BCM", @@ -247,12 +527,38 @@ static const struct hci_uart_proto bcm_proto = { .dequeue = bcm_dequeue, }; +#ifdef CONFIG_ACPI +static const struct acpi_device_id bcm_acpi_match[] = { + { "BCM2E39", 0 }, + { "BCM2E67", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); +#endif + +/* Platform suspend and resume callbacks */ +static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume); + +static struct platform_driver bcm_driver = { + .probe = bcm_probe, + .remove = bcm_remove, + .driver = { + .name = "hci_bcm", + .acpi_match_table = ACPI_PTR(bcm_acpi_match), + .pm = &bcm_pm_ops, + }, +}; + int __init bcm_init(void) { + platform_driver_register(&bcm_driver); + return hci_uart_register_proto(&bcm_proto); } int __exit bcm_deinit(void) { + platform_driver_unregister(&bcm_driver); + return hci_uart_unregister_proto(&bcm_proto); } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 20c2ac193ff9..0d5a05a7c1fd 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -810,6 +810,9 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_BCM bcm_init(); #endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_init(); +#endif return 0; } @@ -839,6 +842,9 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_BCM bcm_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_QCA + qca_deinit(); +#endif /* Release tty registration of line discipline */ err = tty_unregister_ldisc(N_HCI); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c new file mode 100644 index 000000000000..6b9b91267959 --- /dev/null +++ b/drivers/bluetooth/hci_qca.c @@ -0,0 +1,969 @@ +/* + * Bluetooth Software UART Qualcomm protocol + * + * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management + * protocol extension to H4. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved. + * + * Acknowledgements: + * This file is based on hci_ll.c, which was... + * Written by Ohad Ben-Cohen + * which was in turn based on hci_h4.c, which was written + * by Maxim Krasnyansky and Marcel Holtmann. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btqca.h" + +/* HCI_IBS protocol messages */ +#define HCI_IBS_SLEEP_IND 0xFE +#define HCI_IBS_WAKE_IND 0xFD +#define HCI_IBS_WAKE_ACK 0xFC +#define HCI_MAX_IBS_SIZE 10 + +/* Controller states */ +#define STATE_IN_BAND_SLEEP_ENABLED 1 + +#define IBS_WAKE_RETRANS_TIMEOUT_MS 100 +#define IBS_TX_IDLE_TIMEOUT_MS 2000 +#define BAUDRATE_SETTLE_TIMEOUT_MS 300 + +/* HCI_IBS transmit side sleep protocol states */ +enum tx_ibs_states { + HCI_IBS_TX_ASLEEP, + HCI_IBS_TX_WAKING, + HCI_IBS_TX_AWAKE, +}; + +/* HCI_IBS receive side sleep protocol states */ +enum rx_states { + HCI_IBS_RX_ASLEEP, + HCI_IBS_RX_AWAKE, +}; + +/* HCI_IBS transmit and receive side clock state vote */ +enum hci_ibs_clock_state_vote { + HCI_IBS_VOTE_STATS_UPDATE, + HCI_IBS_TX_VOTE_CLOCK_ON, + HCI_IBS_TX_VOTE_CLOCK_OFF, + HCI_IBS_RX_VOTE_CLOCK_ON, + HCI_IBS_RX_VOTE_CLOCK_OFF, +}; + +struct qca_data { + struct hci_uart *hu; + struct sk_buff *rx_skb; + struct sk_buff_head txq; + struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */ + spinlock_t hci_ibs_lock; /* HCI_IBS state lock */ + u8 tx_ibs_state; /* HCI_IBS transmit side power state*/ + u8 rx_ibs_state; /* HCI_IBS receive side power state */ + u32 tx_vote; /* Clock must be on for TX */ + u32 rx_vote; /* Clock must be on for RX */ + struct timer_list tx_idle_timer; + u32 tx_idle_delay; + struct timer_list wake_retrans_timer; + u32 wake_retrans; + struct workqueue_struct *workqueue; + struct work_struct ws_awake_rx; + struct work_struct ws_awake_device; + struct work_struct ws_rx_vote_off; + struct work_struct ws_tx_vote_off; + unsigned long flags; + + /* For debugging purpose */ + u64 ibs_sent_wacks; + u64 ibs_sent_slps; + u64 ibs_sent_wakes; + u64 ibs_recv_wacks; + u64 ibs_recv_slps; + u64 ibs_recv_wakes; + u64 vote_last_jif; + u32 vote_on_ms; + u32 vote_off_ms; + u64 tx_votes_on; + u64 rx_votes_on; + u64 tx_votes_off; + u64 rx_votes_off; + u64 votes_on; + u64 votes_off; +}; + +static void __serial_clock_on(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to enable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock here if needed + */ +} + +static void __serial_clock_off(struct tty_struct *tty) +{ + /* TODO: Some chipset requires to disable UART clock on client + * side to save power consumption or manual work is required. + * Please put your code to control UART clock off here if needed + */ +} + +/* serial_clock_vote needs to be called with the ibs lock held */ +static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + unsigned int diff; + + bool old_vote = (qca->tx_vote | qca->rx_vote); + bool new_vote; + + switch (vote) { + case HCI_IBS_VOTE_STATS_UPDATE: + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (old_vote) + qca->vote_off_ms += diff; + else + qca->vote_on_ms += diff; + return; + + case HCI_IBS_TX_VOTE_CLOCK_ON: + qca->tx_vote = true; + qca->tx_votes_on++; + new_vote = true; + break; + + case HCI_IBS_RX_VOTE_CLOCK_ON: + qca->rx_vote = true; + qca->rx_votes_on++; + new_vote = true; + break; + + case HCI_IBS_TX_VOTE_CLOCK_OFF: + qca->tx_vote = false; + qca->tx_votes_off++; + new_vote = qca->rx_vote | qca->tx_vote; + break; + + case HCI_IBS_RX_VOTE_CLOCK_OFF: + qca->rx_vote = false; + qca->rx_votes_off++; + new_vote = qca->rx_vote | qca->tx_vote; + break; + + default: + BT_ERR("Voting irregularity"); + return; + } + + if (new_vote != old_vote) { + if (new_vote) + __serial_clock_on(hu->tty); + else + __serial_clock_off(hu->tty); + + BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false", + vote? "true" : "false"); + + diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); + + if (new_vote) { + qca->votes_on++; + qca->vote_off_ms += diff; + } else { + qca->votes_off++; + qca->vote_on_ms += diff; + } + qca->vote_last_jif = jiffies; + } +} + +/* Builds and sends an HCI_IBS command packet. + * These are very simple packets with only 1 cmd byte. + */ +static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu) +{ + int err = 0; + struct sk_buff *skb = NULL; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd); + + skb = bt_skb_alloc(1, GFP_ATOMIC); + if (!skb) { + BT_ERR("Failed to allocate memory for HCI_IBS packet"); + return -ENOMEM; + } + + /* Assign HCI_IBS type */ + *skb_put(skb, 1) = cmd; + + skb_queue_tail(&qca->txq, skb); + + return err; +} + +static void qca_wq_awake_device(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_device); + struct hci_uart *hu = qca->hu; + unsigned long retrans_delay; + + BT_DBG("hu %p wq awake device", hu); + + /* Vote for serial clock */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); + + spin_lock(&qca->hci_ibs_lock); + + /* Send wake indication to device */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) + BT_ERR("Failed to send WAKE to device"); + + qca->ibs_sent_wakes++; + + /* Start retransmit timer */ + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + + spin_unlock(&qca->hci_ibs_lock); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_awake_rx(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_awake_rx); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p wq awake rx", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); + + spin_lock(&qca->hci_ibs_lock); + qca->rx_ibs_state = HCI_IBS_RX_AWAKE; + + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) + BT_ERR("Failed to acknowledge device wake up"); + + qca->ibs_sent_wacks++; + + spin_unlock(&qca->hci_ibs_lock); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_rx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p rx clock vote off", hu); + + serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu); +} + +static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) +{ + struct qca_data *qca = container_of(work, struct qca_data, + ws_tx_vote_off); + struct hci_uart *hu = qca->hu; + + BT_DBG("hu %p tx clock vote off", hu); + + /* Run HCI tx handling unlocked */ + hci_uart_tx_wakeup(hu); + + /* Now that message queued to tty driver, vote for tty clocks off. + * It is up to the tty driver to pend the clocks off until tx done. + */ + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); +} + +static void hci_ibs_tx_idle_timeout(unsigned long arg) +{ + struct hci_uart *hu = (struct hci_uart *)arg; + struct qca_data *qca = hu->priv; + unsigned long flags; + + BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* TX_IDLE, go to SLEEP */ + if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) { + BT_ERR("Failed to send SLEEP to device"); + break; + } + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->ibs_sent_slps++; + queue_work(qca->workqueue, &qca->ws_tx_vote_off); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_WAKING: + /* Fall through */ + + default: + BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +static void hci_ibs_wake_retrans_timeout(unsigned long arg) +{ + struct hci_uart *hu = (struct hci_uart *)arg; + struct qca_data *qca = hu->priv; + unsigned long flags, retrans_delay; + unsigned long retransmit = 0; + + BT_DBG("hu %p wake retransmit timeout in %d state", + hu, qca->tx_ibs_state); + + spin_lock_irqsave_nested(&qca->hci_ibs_lock, + flags, SINGLE_DEPTH_NESTING); + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_WAKING: + /* No WAKE_ACK, retransmit WAKE */ + retransmit = 1; + if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wakes++; + retrans_delay = msecs_to_jiffies(qca->wake_retrans); + mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); + break; + + case HCI_IBS_TX_ASLEEP: + case HCI_IBS_TX_AWAKE: + /* Fall through */ + + default: + BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + if (retransmit) + hci_uart_tx_wakeup(hu); +} + +/* Initialize protocol */ +static int qca_open(struct hci_uart *hu) +{ + struct qca_data *qca; + + BT_DBG("hu %p qca_open", hu); + + qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC); + if (!qca) + return -ENOMEM; + + skb_queue_head_init(&qca->txq); + skb_queue_head_init(&qca->tx_wait_q); + spin_lock_init(&qca->hci_ibs_lock); + qca->workqueue = create_singlethread_workqueue("qca_wq"); + if (!qca->workqueue) { + BT_ERR("QCA Workqueue not initialized properly"); + kfree(qca); + return -ENOMEM; + } + + INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); + INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); + INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); + INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); + + qca->hu = hu; + + /* Assume we start with both sides asleep -- extra wakes OK */ + qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + + /* clocks actually on, but we start votes off */ + qca->tx_vote = false; + qca->rx_vote = false; + qca->flags = 0; + + qca->ibs_sent_wacks = 0; + qca->ibs_sent_slps = 0; + qca->ibs_sent_wakes = 0; + qca->ibs_recv_wacks = 0; + qca->ibs_recv_slps = 0; + qca->ibs_recv_wakes = 0; + qca->vote_last_jif = jiffies; + qca->vote_on_ms = 0; + qca->vote_off_ms = 0; + qca->votes_on = 0; + qca->votes_off = 0; + qca->tx_votes_on = 0; + qca->tx_votes_off = 0; + qca->rx_votes_on = 0; + qca->rx_votes_off = 0; + + hu->priv = qca; + + init_timer(&qca->wake_retrans_timer); + qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout; + qca->wake_retrans_timer.data = (u_long)hu; + qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; + + init_timer(&qca->tx_idle_timer); + qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout; + qca->tx_idle_timer.data = (u_long)hu; + qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; + + BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", + qca->tx_idle_delay, qca->wake_retrans); + + return 0; +} + +static void qca_debugfs_init(struct hci_dev *hdev) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct dentry *ibs_dir; + umode_t mode; + + if (!hdev->debugfs) + return; + + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); + + /* read only */ + mode = S_IRUGO; + debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); + debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); + debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, + &qca->ibs_sent_slps); + debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir, + &qca->ibs_sent_wakes); + debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir, + &qca->ibs_sent_wacks); + debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir, + &qca->ibs_recv_slps); + debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir, + &qca->ibs_recv_wakes); + debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir, + &qca->ibs_recv_wacks); + debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); + debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); + debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); + debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); + debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); + debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); + debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); + debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); + debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); + debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); + + /* read/write */ + mode = S_IRUGO | S_IWUSR; + debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); + debugfs_create_u32("tx_idle_delay", mode, ibs_dir, + &qca->tx_idle_delay); +} + +/* Flush protocol data */ +static int qca_flush(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca flush", hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + + return 0; +} + +/* Close protocol */ +static int qca_close(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca close", hu); + + serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu); + + skb_queue_purge(&qca->tx_wait_q); + skb_queue_purge(&qca->txq); + del_timer(&qca->tx_idle_timer); + del_timer(&qca->wake_retrans_timer); + destroy_workqueue(qca->workqueue); + qca->hu = NULL; + + kfree_skb(qca->rx_skb); + + hu->priv = NULL; + + kfree(qca); + + return 0; +} + +/* Called upon a wake-up-indication from the device. + */ +static void device_want_to_wakeup(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to wake up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wakes++; + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_ASLEEP: + /* Make sure clock is on - we may have turned clock off since + * receiving the wake up indicator awake rx clock. + */ + queue_work(qca->workqueue, &qca->ws_awake_rx); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + return; + + case HCI_IBS_RX_AWAKE: + /* Always acknowledge device wake up, + * sending IBS message doesn't count as TX ON. + */ + if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) { + BT_ERR("Failed to acknowledge device wake up"); + break; + } + qca->ibs_sent_wacks++; + break; + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Called upon a sleep-indication from the device. + */ +static void device_want_to_sleep(struct hci_uart *hu) +{ + unsigned long flags; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p want to sleep", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_slps++; + + switch (qca->rx_ibs_state) { + case HCI_IBS_RX_AWAKE: + /* Update state */ + qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; + /* Vote off rx clock under workqueue */ + queue_work(qca->workqueue, &qca->ws_rx_vote_off); + break; + + case HCI_IBS_RX_ASLEEP: + /* Fall through */ + + default: + /* Any other state is illegal */ + BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d", + qca->rx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); +} + +/* Called upon wake-up-acknowledgement from the device + */ +static void device_woke_up(struct hci_uart *hu) +{ + unsigned long flags, idle_delay; + struct qca_data *qca = hu->priv; + struct sk_buff *skb = NULL; + + BT_DBG("hu %p woke up", hu); + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + qca->ibs_recv_wacks++; + + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + /* Expect one if we send 2 WAKEs */ + BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + + case HCI_IBS_TX_WAKING: + /* Send pending packets */ + while ((skb = skb_dequeue(&qca->tx_wait_q))) + skb_queue_tail(&qca->txq, skb); + + /* Switch timers and change state to HCI_IBS_TX_AWAKE */ + del_timer(&qca->wake_retrans_timer); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + qca->tx_ibs_state = HCI_IBS_TX_AWAKE; + break; + + case HCI_IBS_TX_ASLEEP: + /* Fall through */ + + default: + BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", + qca->tx_ibs_state); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + /* Actually send the packets */ + hci_uart_tx_wakeup(hu); +} + +/* Enqueue frame for transmittion (padding, crc, etc) may be called from + * two simultaneous tasklets. + */ +static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + unsigned long flags = 0, idle_delay; + struct qca_data *qca = hu->priv; + + BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, + qca->tx_ibs_state); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + + /* Don't go to sleep in middle of patch download or + * Out-Of-Band(GPIOs control) sleep is selected. + */ + if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) { + skb_queue_tail(&qca->txq, skb); + return 0; + } + + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + + /* Act according to current state */ + switch (qca->tx_ibs_state) { + case HCI_IBS_TX_AWAKE: + BT_DBG("Device awake, sending normally"); + skb_queue_tail(&qca->txq, skb); + idle_delay = msecs_to_jiffies(qca->tx_idle_delay); + mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); + break; + + case HCI_IBS_TX_ASLEEP: + BT_DBG("Device asleep, waking up and queueing packet"); + /* Save packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + + qca->tx_ibs_state = HCI_IBS_TX_WAKING; + /* Schedule a work queue to wake up device */ + queue_work(qca->workqueue, &qca->ws_awake_device); + break; + + case HCI_IBS_TX_WAKING: + BT_DBG("Device waking up, queueing packet"); + /* Transient state; just keep packet for later */ + skb_queue_tail(&qca->tx_wait_q, skb); + break; + + default: + BT_ERR("Illegal tx state: %d (losing packet)", + qca->tx_ibs_state); + kfree_skb(skb); + break; + } + + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); + + return 0; +} + +static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND); + + device_want_to_sleep(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND); + + device_want_to_wakeup(hu); + + kfree_skb(skb); + return 0; +} + +static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + + BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK); + + device_woke_up(hu); + + kfree_skb(skb); + return 0; +} + +#define QCA_IBS_SLEEP_IND_EVENT \ + .type = HCI_IBS_SLEEP_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_IND_EVENT \ + .type = HCI_IBS_WAKE_IND, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +#define QCA_IBS_WAKE_ACK_EVENT \ + .type = HCI_IBS_WAKE_ACK, \ + .hlen = 0, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = HCI_MAX_IBS_SIZE + +static const struct h4_recv_pkt qca_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind }, + { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack }, + { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind }, +}; + +static int qca_recv(struct hci_uart *hu, const void *data, int count) +{ + struct qca_data *qca = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, + qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); + if (IS_ERR(qca->rx_skb)) { + int err = PTR_ERR(qca->rx_skb); + BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err); + qca->rx_skb = NULL; + return err; + } + + return count; +} + +static struct sk_buff *qca_dequeue(struct hci_uart *hu) +{ + struct qca_data *qca = hu->priv; + + return skb_dequeue(&qca->txq); +} + +static uint8_t qca_get_baudrate_value(int speed) +{ + switch(speed) { + case 9600: + return QCA_BAUDRATE_9600; + case 19200: + return QCA_BAUDRATE_19200; + case 38400: + return QCA_BAUDRATE_38400; + case 57600: + return QCA_BAUDRATE_57600; + case 115200: + return QCA_BAUDRATE_115200; + case 230400: + return QCA_BAUDRATE_230400; + case 460800: + return QCA_BAUDRATE_460800; + case 500000: + return QCA_BAUDRATE_500000; + case 921600: + return QCA_BAUDRATE_921600; + case 1000000: + return QCA_BAUDRATE_1000000; + case 2000000: + return QCA_BAUDRATE_2000000; + case 3000000: + return QCA_BAUDRATE_3000000; + case 3500000: + return QCA_BAUDRATE_3500000; + default: + return QCA_BAUDRATE_115200; + } +} + +static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) +{ + struct hci_uart *hu = hci_get_drvdata(hdev); + struct qca_data *qca = hu->priv; + struct sk_buff *skb; + u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; + + if (baudrate > QCA_BAUDRATE_3000000) + return -EINVAL; + + cmd[4] = baudrate; + + skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC); + if (!skb) { + BT_ERR("Failed to allocate memory for baudrate packet"); + return -ENOMEM; + } + + /* Assign commands to change baudrate and packet type. */ + memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd)); + bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; + + skb_queue_tail(&qca->txq, skb); + hci_uart_tx_wakeup(hu); + + /* wait 300ms to change new baudrate on controller side + * controller will come back after they receive this HCI command + * then host can communicate with new baudrate to controller + */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); + set_current_state(TASK_INTERRUPTIBLE); + + return 0; +} + +static int qca_setup(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + struct qca_data *qca = hu->priv; + unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; + int ret; + + BT_INFO("%s: ROME setup", hdev->name); + + /* Patch downloading has to be done without IBS mode */ + clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); + + /* Setup initial baudrate */ + speed = 0; + if (hu->init_speed) + speed = hu->init_speed; + else if (hu->proto->init_speed) + speed = hu->proto->init_speed; + + if (speed) + hci_uart_set_baudrate(hu, speed); + + /* Setup user speed if needed */ + speed = 0; + if (hu->oper_speed) + speed = hu->oper_speed; + else if (hu->proto->oper_speed) + speed = hu->proto->oper_speed; + + if (speed) { + qca_baudrate = qca_get_baudrate_value(speed); + + BT_INFO("%s: Set UART speed to %d", hdev->name, speed); + ret = qca_set_baudrate(hdev, qca_baudrate); + if (ret) { + BT_ERR("%s: Failed to change the baud rate (%d)", + hdev->name, ret); + return ret; + } + hci_uart_set_baudrate(hu, speed); + } + + /* Setup patch / NVM configurations */ + ret = qca_uart_setup_rome(hdev, qca_baudrate); + if (!ret) { + set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); + qca_debugfs_init(hdev); + } + + /* Setup bdaddr */ + hu->hdev->set_bdaddr = qca_set_bdaddr_rome; + + return ret; +} + +static struct hci_uart_proto qca_proto = { + .id = HCI_UART_QCA, + .name = "QCA", + .init_speed = 115200, + .oper_speed = 3000000, + .open = qca_open, + .close = qca_close, + .flush = qca_flush, + .setup = qca_setup, + .recv = qca_recv, + .enqueue = qca_enqueue, + .dequeue = qca_dequeue, +}; + +int __init qca_init(void) +{ + return hci_uart_register_proto(&qca_proto); +} + +int __exit qca_deinit(void) +{ + return hci_uart_unregister_proto(&qca_proto); +} diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 496587a73a9d..495b9ef52bb0 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -35,7 +35,7 @@ #define HCIUARTGETFLAGS _IOR('U', 204, int) /* UART protocols */ -#define HCI_UART_MAX_PROTO 8 +#define HCI_UART_MAX_PROTO 9 #define HCI_UART_H4 0 #define HCI_UART_BCSP 1 @@ -45,6 +45,7 @@ #define HCI_UART_ATH3K 5 #define HCI_UART_INTEL 6 #define HCI_UART_BCM 7 +#define HCI_UART_QCA 8 #define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RESET_ON_INIT 1 @@ -176,3 +177,8 @@ int intel_deinit(void); int bcm_init(void); int bcm_deinit(void); #endif + +#ifdef CONFIG_BT_HCIUART_QCA +int qca_init(void); +int qca_deinit(void); +#endif diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index da8faf78536a..5643b65cee20 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused) static void start_khwrngd(void) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); - if (hwrng_fill == ERR_PTR(-ENOMEM)) { + if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed"); hwrng_fill = NULL; } diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index e362860c2b50..cd593c1f66dc 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -20,7 +20,7 @@ #include #include -#include +#include static uint nowait; diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 7ba495f75370..402631a19a11 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) crypt->mode |= NPE_OP_NOT_IN_PLACE; /* This was never tested by Intel * for more than one dst buffer, I think. */ - BUG_ON(req->dst->length < nbytes); req_ctx->dst = NULL; if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, flags, DMA_FROM_DEVICE)) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 067402c7c2a9..df427c0e9e7b 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -73,7 +73,8 @@ ICP_QAT_HW_CIPHER_KEY_CONVERT, \ ICP_QAT_HW_CIPHER_DECRYPT) -static atomic_t active_dev; +static DEFINE_MUTEX(algs_lock); +static unsigned int active_devs; struct qat_alg_buf { uint32_t len; @@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { { int qat_algs_register(void) { - if (atomic_add_return(1, &active_dev) == 1) { + int ret = 0; + + mutex_lock(&algs_lock); + if (++active_devs == 1) { int i; for (i = 0; i < ARRAY_SIZE(qat_algs); i++) @@ -1289,21 +1293,25 @@ int qat_algs_register(void) CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; - return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); + ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); } - return 0; + mutex_unlock(&algs_lock); + return ret; } int qat_algs_unregister(void) { - if (atomic_sub_return(1, &active_dev) == 0) - return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); - return 0; + int ret = 0; + + mutex_lock(&algs_lock); + if (--active_devs == 0) + ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); + mutex_unlock(&algs_lock); + return ret; } int qat_algs_init(void) { - atomic_set(&active_dev, 0); crypto_get_default_rng(); return 0; } diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 59892126d175..d3629b7482dd 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -48,6 +48,8 @@ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) +#define ATC_MAX_DSCR_TRIALS 10 + /* * Initial number of descriptors to allocate for each channel. This could * be increased during dma usage. @@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan, * * @current_len: the number of bytes left before reading CTRLA * @ctrla: the value of CTRLA - * @desc: the descriptor containing the transfer width */ -static inline int atc_calc_bytes_left(int current_len, u32 ctrla, - struct at_desc *desc) +static inline int atc_calc_bytes_left(int current_len, u32 ctrla) { - return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); -} + u32 btsize = (ctrla & ATC_BTSIZE_MAX); + u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla); -/** - * atc_calc_bytes_left_from_reg - calculates the number of bytes left according - * to the current value of CTRLA. - * - * @current_len: the number of bytes left before reading CTRLA - * @atchan: the channel to read CTRLA for - * @desc: the descriptor containing the transfer width - */ -static inline int atc_calc_bytes_left_from_reg(int current_len, - struct at_dma_chan *atchan, struct at_desc *desc) -{ - u32 ctrla = channel_readl(atchan, CTRLA); - - return atc_calc_bytes_left(current_len, ctrla, desc); + /* + * According to the datasheet, when reading the Control A Register + * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the + * number of transfers completed on the Source Interface. + * So btsize is always a number of source width transfers. + */ + return current_len - (btsize << src_width); } /** @@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr; + u32 ctrla, dscr, trials; /* * If the cookie doesn't match to the currently running transfer then @@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) * the channel's DSCR register and compare it against the value * of the hardware linked list structure of each child * descriptor. + * + * The CTRLA register provides us with the amount of data + * already read from the source for the current child + * descriptor. So we can compute a more accurate residue by also + * removing the number of bytes corresponding to this amount of + * data. + * + * However, the DSCR and CTRLA registers cannot be read both + * atomically. Hence a race condition may occur: the first read + * register may refer to one child descriptor whereas the second + * read may refer to a later child descriptor in the list + * because of the DMA transfer progression inbetween the two + * reads. + * + * One solution could have been to pause the DMA transfer, read + * the DSCR and CTRLA then resume the DMA transfer. Nonetheless, + * this approach presents some drawbacks: + * - If the DMA transfer is paused, RX overruns or TX underruns + * are more likey to occur depending on the system latency. + * Taking the USART driver as an example, it uses a cyclic DMA + * transfer to read data from the Receive Holding Register + * (RHR) to avoid RX overruns since the RHR is not protected + * by any FIFO on most Atmel SoCs. So pausing the DMA transfer + * to compute the residue would break the USART driver design. + * - The atc_pause() function masks interrupts but we'd rather + * avoid to do so for system latency purpose. + * + * Then we'd rather use another solution: the DSCR is read a + * first time, the CTRLA is read in turn, next the DSCR is read + * a second time. If the two consecutive read values of the DSCR + * are the same then we assume both refers to the very same + * child descriptor as well as the CTRLA value read inbetween + * does. For cyclic tranfers, the assumption is that a full loop + * is "not so fast". + * If the two DSCR values are different, we read again the CTRLA + * then the DSCR till two consecutive read values from DSCR are + * equal or till the maxium trials is reach. + * This algorithm is very unlikely not to find a stable value for + * DSCR. */ - ctrla = channel_readl(atchan, CTRLA); - rmb(); /* ensure CTRLA is read before DSCR */ dscr = channel_readl(atchan, DSCR); + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + u32 new_dscr; + + rmb(); /* ensure DSCR is read after CTRLA */ + new_dscr = channel_readl(atchan, DSCR); + + /* + * If the DSCR register value has not changed inside the + * DMA controller since the previous read, we assume + * that both the dscr and ctrla values refers to the + * very same descriptor. + */ + if (likely(new_dscr == dscr)) + break; + + /* + * DSCR has changed inside the DMA controller, so the + * previouly read value of CTRLA may refer to an already + * processed descriptor hence could be outdated. + * We need to update ctrla to match the current + * descriptor. + */ + dscr = new_dscr; + rmb(); /* ensure DSCR is read before CTRLA */ + ctrla = channel_readl(atchan, CTRLA); + } + if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ if (desc_first->lli.dscr == dscr) - return atc_calc_bytes_left(ret, ctrla, desc_first); + return atc_calc_bytes_left(ret, ctrla); ret -= desc_first->len; list_for_each_entry(desc, &desc_first->tx_list, desc_node) { @@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) } /* - * For the last descriptor in the chain we can calculate + * For the current descriptor in the chain we can calculate * the remaining bytes using the channel's register. - * Note that the transfer width of the first and last - * descriptor may differ. */ - if (!desc->lli.dscr) - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); + ret = atc_calc_bytes_left(ret, ctrla); } else { /* single transfer */ - ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); + ctrla = channel_readl(atchan, CTRLA); + ret = atc_calc_bytes_left(ret, ctrla); } return ret; @@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan, desc->txd.cookie = -EBUSY; desc->total_len = desc->len = len; - desc->tx_width = dwidth; /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, first->txd.cookie = -EBUSY; first->total_len = len; - /* set transfer width for the calculation of the residue */ - first->tx_width = src_width; - prev->tx_width = src_width; - /* set end-of-link to the last link descriptor of list*/ set_desc_eol(desc); @@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, first->txd.cookie = -EBUSY; first->total_len = total_len; - /* set transfer width for the calculation of the residue */ - first->tx_width = reg_width; - prev->tx_width = reg_width; - /* first link descriptor of list is responsible of flags */ first->txd.flags = flags; /* client is in control of this ack */ @@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan, desc->txd.cookie = 0; desc->len = len; - /* - * Although we only need the transfer width for the first and - * the last descriptor, its easier to set it to all descriptors. - */ - desc->tx_width = src_width; - atc_desc_chain(&first, &prev, desc); /* update the lengths and addresses for the next loop cycle */ @@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, /* First descriptor of the chain embedds additional information */ first->txd.cookie = -EBUSY; first->total_len = buf_len; - first->tx_width = reg_width; return &first->txd; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index bc8d5ebedd19..7f5a08230f76 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -112,6 +112,7 @@ #define ATC_SRC_WIDTH_BYTE (0x0 << 24) #define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) #define ATC_SRC_WIDTH_WORD (0x2 << 24) +#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3) #define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ #define ATC_DST_WIDTH(x) ((x) << 28) #define ATC_DST_WIDTH_BYTE (0x0 << 28) @@ -182,7 +183,6 @@ struct at_lli { * @txd: support for the async_tx api * @desc_node: node on the channed descriptors list * @len: descriptor byte count - * @tx_width: transfer width * @total_len: total transaction byte count */ struct at_desc { @@ -194,7 +194,6 @@ struct at_desc { struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; - u32 tx_width; size_t total_len; /* Interleaved data */ diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index cf1213de7865..40afa2a16cfc 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (at_xdmac_chan_is_cyclic(atchan)) { + if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); - } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) { + else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; - } else { - /* - * No need to write AT_XDMAC_CC reg, it will be done when the - * descriptor is fecthed. - */ + else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; - } + /* + * Even if the register will be updated from the configuration in the + * descriptor when using view 2 or higher, the PROT bit won't be set + * properly. This bit can be modified only by using the channel + * configuration register. + */ + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); reg |= AT_XDMAC_CNDC_NDDUP | AT_XDMAC_CNDC_NDSUP @@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->sconfig.dst_addr; } - desc->lld.mbr_cfg = atchan->cfg; - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + dwidth = at_xdmac_get_dwidth(atchan->cfg); fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) - ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + ? dwidth : AT_XDMAC_CC_DWIDTH_BYTE; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (len >> fixed_dwidth); /* microblock length */ + desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | + AT_XDMAC_CC_DWIDTH(fixed_dwidth); dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fbaf1ead2597..f1325f62563e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan, config &= ~0x7; config |= op_mode; - if (IS_ENABLED(__BIG_ENDIAN)) - config |= XOR_DESCRIPTOR_SWAP; - else - config &= ~XOR_DESCRIPTOR_SWAP; +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f513f77b1d85..ecab4ea059b4 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } - last->last = false; + desc->last = false; dma_cookie_assign(&desc->txd); @@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = get_burst_len(desc, len); + desc->bytes_requested = len; desc->txd.flags = flags; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 7d2c17d8d30f..6f80432a3f0a 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&vc->lock, flags); cookie = dma_cookie_assign(tx); - list_move_tail(&vd->node, &vc->desc_submitted); + list_add_tail(&vd->node, &vc->desc_submitted); spin_unlock_irqrestore(&vc->lock, flags); dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", @@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg) cb_data = vd->tx.callback_param; list_del(&vd->node); - if (async_tx_test_ack(&vd->tx)) - list_add(&vd->node, &vc->desc_allocated); - else - vc->desc_free(vd); + + vc->desc_free(vd); if (cb) cb(cb_data); @@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) while (!list_empty(head)) { struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc, node); - if (async_tx_test_ack(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) dma_cookie_init(&vc->chan); spin_lock_init(&vc->lock); - INIT_LIST_HEAD(&vc->desc_allocated); INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 189e75dbcb15..181b95267866 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -29,7 +29,6 @@ struct virt_dma_chan { spinlock_t lock; /* protected by vc.lock */ - struct list_head desc_allocated; struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; @@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan struct virt_dma_desc *vd, unsigned long tx_flags) { extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); - unsigned long flags; dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vd->tx.flags = tx_flags; vd->tx.tx_submit = vchan_tx_submit; - spin_lock_irqsave(&vc->lock, flags); - list_add_tail(&vd->node, &vc->desc_allocated); - spin_unlock_irqrestore(&vc->lock, flags); - return &vd->tx; } @@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) } /** - * vchan_get_all_descriptors - obtain all allocated, submitted and issued - * descriptors + * vchan_get_all_descriptors - obtain all submitted and issued descriptors * vc: virtual channel to get descriptors from * head: list of descriptors found * @@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, struct list_head *head) { - list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); @@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) { - struct virt_dma_desc *vd; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&vc->lock, flags); vchan_get_all_descriptors(vc, &head); - list_for_each_entry(vd, &head, node) - async_tx_clear_ack(&vd->tx); spin_unlock_irqrestore(&vc->lock, flags); vchan_dma_desc_free_list(vc, &head); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 620fd55ec766..dff22ab01851 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -111,6 +111,7 @@ #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_BLK_MEM_RDY 0xD074 #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF +#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000 /* X-Gene SoC EFUSE csr register and bit defination */ #define XGENE_SOC_JTAG1_SHADOW 0x18 @@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev, return -ENOMEM; } + pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET; + /* Get efuse csr region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (!res) { diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index 3515b381c131..711d8ad74f11 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1) */ for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *csi = &mci->csrows[row]; + struct csrow_info *csi = mci->csrows[row]; /* * Get the configuration settings for this diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 080d5cc27055..eebdf2a33bfe 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c @@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev) status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); if (status) { dev_err(&pdev->dev, "failed to register extcon device\n"); - kfree(palmas_usb->edev->name); return status; } @@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->id_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev) if (status < 0) { dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", palmas_usb->vbus_irq, status); - kfree(palmas_usb->edev->name); return status; } } @@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev) return 0; } -static int palmas_usb_remove(struct platform_device *pdev) -{ - struct palmas_usb *palmas_usb = platform_get_drvdata(pdev); - - kfree(palmas_usb->edev->name); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int palmas_usb_suspend(struct device *dev) { @@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = { static struct platform_driver palmas_usb_driver = { .probe = palmas_usb_probe, - .remove = palmas_usb_remove, .driver = { .name = "palmas-usb", .of_match_table = of_palmas_match_tbl, diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 76157ab9faf3..43b57b02d050 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id return -EINVAL; } -static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +static int find_cable_id_by_name(struct extcon_dev *edev, const char *name) { - unsigned int id = EXTCON_NONE; + unsigned int id = -EINVAL; int i = 0; - if (edev->max_supported == 0) - return -EINVAL; - - /* Find the the number of extcon cable */ + /* Find the id of extcon cable */ while (extcon_name[i]) { if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { id = i; break; } + i++; } - if (id == EXTCON_NONE) + return id; +} + +static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) +{ + unsigned int id; + + if (edev->max_supported == 0) return -EINVAL; + /* Find the the number of extcon cable */ + id = find_cable_id_by_name(edev, name); + if (id < 0) + return id; + return find_cable_index_by_id(edev, id); } @@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev, struct extcon_cable *cable = container_of(attr, struct extcon_cable, attr_state); + int i = cable->cable_index; + return sprintf(buf, "%d\n", extcon_get_cable_state_(cable->edev, - cable->cable_index)); + cable->edev->supported_cable[i])); } /** @@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state) spin_lock_irqsave(&edev->lock, flags); if (edev->state != ((edev->state & ~mask) | (state & mask))) { + u32 old_state; + if (check_mutually_exclusive(edev, (edev->state & ~mask) | (state & mask))) { spin_unlock_irqrestore(&edev->lock, flags); return -EPERM; } - for (index = 0; index < edev->max_supported; index++) { - if (is_extcon_changed(edev->state, state, index, &attached)) - raw_notifier_call_chain(&edev->nh[index], attached, edev); - } - + old_state = edev->state; edev->state &= ~mask; edev->state |= state & mask; + for (index = 0; index < edev->max_supported; index++) { + if (is_extcon_changed(old_state, edev->state, index, + &attached)) + raw_notifier_call_chain(&edev->nh[index], + attached, edev); + } + /* This could be in interrupt handler */ prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); if (prop_buf) { @@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_); */ int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) { - return extcon_get_cable_state_(edev, find_cable_index_by_name - (edev, cable_name)); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_get_cable_state_(edev, id); } EXPORT_SYMBOL_GPL(extcon_get_cable_state); @@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_); int extcon_set_cable_state(struct extcon_dev *edev, const char *cable_name, bool cable_state) { - return extcon_set_cable_state_(edev, find_cable_index_by_name - (edev, cable_name), cable_state); + unsigned int id; + + id = find_cable_id_by_name(edev, cable_name); + if (id < 0) + return id; + + return extcon_set_cable_state_(edev, id, cable_state); } EXPORT_SYMBOL_GPL(extcon_set_cable_state); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9fa8084a7c8d..d6144e3b97c5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -58,6 +58,11 @@ bool efi_runtime_disabled(void) static int __init parse_efi_cmdline(char *str) { + if (!str) { + pr_warn("need at least one option\n"); + return -EINVAL; + } + if (parse_option_str(str, "noruntime")) disable_runtime = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 31b00f91cfcd..f7b49d5ce4b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1130,6 +1130,9 @@ struct amdgpu_gfx { uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; + uint32_t rlc_feature_version; + uint32_t mec_feature_version; + uint32_t mec2_feature_version; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; @@ -1639,6 +1642,7 @@ struct amdgpu_sdma { /* SDMA firmware */ const struct firmware *fw; uint32_t fw_version; + uint32_t feature_version; struct amdgpu_ring ring; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9736892bcdf9..3bfe67de8349 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_INFO_FW_GFX_RLC: fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = 0; + fw_info.feature = adev->gfx.rlc_feature_version; break; case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) + if (info->query_fw.index == 0) { fw_info.ver = adev->gfx.mec_fw_version; - else if (info->query_fw.index == 1) + fw_info.feature = adev->gfx.mec_feature_version; + } else if (info->query_fw.index == 1) { fw_info.ver = adev->gfx.mec2_fw_version; - else + fw_info.feature = adev->gfx.mec2_feature_version; + } else return -EINVAL; - fw_info.feature = 0; break; case AMDGPU_INFO_FW_SMC: fw_info.ver = adev->pm.fw_version; @@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (info->query_fw.index >= 2) return -EINVAL; fw_info.ver = adev->sdma[info->query_fw.index].fw_version; - fw_info.feature = 0; + fw_info.feature = adev->sdma[info->query_fw.index].feature_version; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f7a5efa21c2..f5c22556ec2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) unsigned height_in_mb = ALIGN(height / 16, 2); unsigned fs_in_mb = width_in_mb * height_in_mb; - unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; + unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size; image_size = width * height; image_size += image_size / 2; @@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; min_dpb_size = image_size * num_dpb_buffer; + min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) + * 16 * num_dpb_buffer + 52 * 1024; break; default: @@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) buf_sizes[0x1] = dpb_size; buf_sizes[0x2] = image_size; + buf_sizes[0x4] = min_ctx_size; return 0; } @@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) return -EINVAL; } + } else if (cmd == 0x206) { + if ((end - start) < ctx->buf_sizes[4]) { + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, + (unsigned)(end - start), + ctx->buf_sizes[4]); + return -EINVAL; + } } else if ((cmd != 0x100) && (cmd != 0x204)) { DRM_ERROR("invalid UVD command %X!\n", cmd); return -EINVAL; @@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) struct amdgpu_uvd_cs_ctx ctx = {}; unsigned buf_sizes[] = { [0x00000000] = 2048, - [0x00000001] = 32 * 1024 * 1024, - [0x00000002] = 2048 * 1152 * 3, + [0x00000001] = 0xFFFFFFFF, + [0x00000002] = 0xFFFFFFFF, [0x00000003] = 2048, + [0x00000004] = 0xFFFFFFFF, }; struct amdgpu_ib *ib = &parser->ibs[ib_idx]; int r; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ab83cc1ca4cc..15df46c93f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2db6ab0a543d..0d8bf2cb1956 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu( + mec_hdr->ucode_feature_version); gfx_v7_0_cp_compute_enable(adev, false); @@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + mec2_hdr->ucode_feature_version); /* MEC2 */ fw_data = (const __le32 *) @@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu( + hdr->ucode_feature_version); gfx_v7_0_rlc_stop(adev); @@ -5122,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle) dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", RREG32(mmCP_HPD_EOP_CONTROL)); - for (queue = 0; queue < 8; i++) { + for (queue = 0; queue < 8; queue++) { cik_srbm_select(adev, me, pipe, queue, 0); dev_info(adev->dev, " queue: %d\n", queue); dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9e1d4ddbf475..20e2cfd521d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; DRM_DEBUG("\n"); @@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.pfp_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); @@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.me_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); @@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.ce_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); if (err) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); @@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); @@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (err) goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = le32_to_cpu( + cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = le32_to_cpu( + cp_hdr->ucode_feature_version); } else { err = 0; adev->gfx.mec2_fw = NULL; @@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; + adev->gfx.config.max_backends_per_se = 2; switch (adev->pdev->revision) { case 0xc4: @@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcc: /* B10 */ adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc5: case 0x81: @@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) case 0xcd: /* B8 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc6: case 0xca: case 0xce: /* B6 */ adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; break; case 0xc7: case 0x87: @@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) default: /* B4 */ adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; break; } @@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; amdgpu_ucode_print_rlc_hdr(&hdr->header); - adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); amdgpu_ucode_print_gfx_hdr(&me_hdr->header); - adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); - adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); - adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); - adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); - adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); - adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); gfx_v8_0_cp_gfx_enable(adev, false); @@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); - adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec_fw->data + @@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); - adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); fw_data = (const __le32 *) (adev->gfx.mec2_fw->data + @@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - 0x7FFFF << 2); + AMDGPU_DOORBELL_MEC_RING7 << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index d7895885fe0c..a988dfb1d394 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 7bb37b93993f..2b86569b18d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) int err, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; + const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_validate(adev->sdma[i].fw); if (err) goto out; + hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; + adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); + adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); if (adev->firmware.smu_load) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; @@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; amdgpu_ucode_print_sdma_hdr(&hdr->header); fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - fw_data = (const __le32 *) (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5b59d5ad7d1c..9dcc7280e572 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } funcs = connector->helper_private; - new_encoder = funcs->best_encoder(connector); + + if (funcs->atomic_best_encoder) + new_encoder = funcs->atomic_best_encoder(connector, + connector_state); + else + new_encoder = funcs->best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } } + if (WARN_ON(!connector_state->crtc)) + return -EINVAL; + connector_state->best_encoder = new_encoder; idx = drm_crtc_index(connector_state->crtc); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 778bbb6425b8..b0487c9f018c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1294,7 +1294,6 @@ retry: goto retry; } DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); - WARN(1, "fail\n"); return -EIO; } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f9cc68fbd2a3..b50fa0afd907 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600) module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); static void store_vblank(struct drm_device *dev, int crtc, - unsigned vblank_count_inc, + u32 vblank_count_inc, struct timeval *t_vblank) { struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index fe1599d75f14..424228be79ae 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -606,8 +606,6 @@ static void tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, uint8_t *buf, size_t size) { - buf[PB(0)] = tda998x_cksum(buf, size); - reg_clear(priv, REG_DIP_IF_FLAGS, bit); reg_write_range(priv, addr, buf, size); reg_set(priv, REG_DIP_IF_FLAGS, bit); @@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p) buf[PB(4)] = p->audio_frame[4]; buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ + buf[PB(0)] = tda998x_cksum(buf, sizeof(buf)); + tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, sizeof(buf)); } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 198fc3c3291b..3dcd59e694db 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1075,15 +1075,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv, const union child_device_config *p_child; union child_device_config *child_dev_ptr; int i, child_device_num, count; - u16 block_size; + u8 expected_size; + u16 block_size; p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!p_defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } - if (p_defs->child_dev_size < sizeof(*p_child)) { - DRM_ERROR("General definiton block child device size is too small.\n"); + if (bdb->version < 195) { + expected_size = 33; + } else if (bdb->version == 195) { + expected_size = 37; + } else if (bdb->version <= 197) { + expected_size = 38; + } else { + expected_size = 38; + DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", + expected_size, bdb->version); + } + + if (expected_size > sizeof(*p_child)) { + DRM_ERROR("child_device_config cannot fit in p_child\n"); + return; + } + + if (p_defs->child_dev_size != expected_size) { + DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", + p_defs->child_dev_size, expected_size, bdb->version); return; } /* get the block size of general definitions */ @@ -1130,7 +1149,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, child_dev_ptr = dev_priv->vbt.child_dev + count; count++; - memcpy(child_dev_ptr, p_child, sizeof(*p_child)); + memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); } return; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 6e4cc5334f47..600afdbef8c9 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, return MODE_OK; } +static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + struct intel_crtc *crtc = to_intel_crtc(state->crtc); + + return &intel_dp->mst_encoders[crtc->pipe]->base.base; +} + static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); @@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, + .atomic_best_encoder = intel_mst_atomic_best_encoder, .best_encoder = intel_mst_best_encoder, }; diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 3511bbaba505..e3c63640df73 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -462,12 +462,15 @@ out: static void hidinput_cleanup_battery(struct hid_device *dev) { + const struct power_supply_desc *psy_desc; + if (!dev->battery) return; + psy_desc = dev->battery->desc; power_supply_unregister(dev->battery); - kfree(dev->battery->desc->name); - kfree(dev->battery->desc); + kfree(psy_desc->name); + kfree(psy_desc); dev->battery = NULL; } #else /* !CONFIG_HID_BATTERY_STRENGTH */ diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 94167310e15a..b905d501e752 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev) for (p = drvdata->rdesc; p <= drvdata->rdesc + drvdata->rsize - 4;) { if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && - p[3] < sizeof(params)) { + p[3] < ARRAY_SIZE(params)) { v = params[p[3]]; put_unaligned(cpu_to_le32(v), (s32 *)p); p += 4; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 44958d79d598..01b937e63cf3 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1284,6 +1284,39 @@ fail_register_pen_input: return error; } +/* + * Not all devices report physical dimensions from HID. + * Compute the default from hardcoded logical dimension + * and resolution before driver overwrites them. + */ +static void wacom_set_default_phy(struct wacom_features *features) +{ + if (features->x_resolution) { + features->x_phy = (features->x_max * 100) / + features->x_resolution; + features->y_phy = (features->y_max * 100) / + features->y_resolution; + } +} + +static void wacom_calculate_res(struct wacom_features *features) +{ + /* set unit to "100th of a mm" for devices not reported by HID */ + if (!features->unit) { + features->unit = 0x11; + features->unitExpo = -3; + } + + features->x_resolution = wacom_calc_hid_res(features->x_max, + features->x_phy, + features->unit, + features->unitExpo); + features->y_resolution = wacom_calc_hid_res(features->y_max, + features->y_phy, + features->unit, + features->unitExpo); +} + static void wacom_wireless_work(struct work_struct *work) { struct wacom *wacom = container_of(work, struct wacom, work); @@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work) if (wacom_wac1->features.type != INTUOSHT && wacom_wac1->features.type != BAMBOO_PT) wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; + wacom_set_default_phy(&wacom_wac1->features); + wacom_calculate_res(&wacom_wac1->features); snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", wacom_wac1->features.name); snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", @@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work) wacom_wac2->features = *((struct wacom_features *)id->driver_data); wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; + wacom_set_default_phy(&wacom_wac2->features); wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; + wacom_calculate_res(&wacom_wac2->features); snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, "%s (WL) Finger",wacom_wac2->features.name); snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, @@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work) } } -/* - * Not all devices report physical dimensions from HID. - * Compute the default from hardcoded logical dimension - * and resolution before driver overwrites them. - */ -static void wacom_set_default_phy(struct wacom_features *features) -{ - if (features->x_resolution) { - features->x_phy = (features->x_max * 100) / - features->x_resolution; - features->y_phy = (features->y_max * 100) / - features->y_resolution; - } -} - -static void wacom_calculate_res(struct wacom_features *features) -{ - /* set unit to "100th of a mm" for devices not reported by HID */ - if (!features->unit) { - features->unit = 0x11; - features->unitExpo = -3; - } - - features->x_resolution = wacom_calc_hid_res(features->x_max, - features->x_phy, - features->unit, - features->unitExpo); - features->y_resolution = wacom_calc_hid_res(features->y_max, - features->y_phy, - features->unit, - features->unitExpo); -} - static size_t wacom_compute_pktlen(struct hid_device *hdev) { struct hid_report_enum *report_enum; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 37c16afe007a..c8487894b312 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); +static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { + { + /* + * CPU fan speed going up and down on Dell Studio XPS 8100 + * for unknown reasons. + */ + .ident = "Dell Studio XPS 8100", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), + }, + }, + { } +}; + /* * Probe for the presence of a supported laptop. */ @@ -940,7 +955,8 @@ static int __init i8k_probe(void) /* * Get DMI information */ - if (!dmi_check_system(i8k_dmi_table)) { + if (!dmi_check_system(i8k_dmi_table) || + dmi_check_system(i8k_blacklist_dmi_table)) { if (!ignore_dmi && !force) return -ENODEV; diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c index 9b55e673b67c..85d106fe3ce8 100644 --- a/drivers/hwmon/g762.c +++ b/drivers/hwmon/g762.c @@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = { { .compatible = "gmt,g763" }, { }, }; +MODULE_DEVICE_TABLE(of, g762_dt_match); /* * Grab clock (a required property), enable it, get (fixed) clock frequency diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index 6153df735e82..08ff89d222e5 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = { {"nct7904", 0}, {} }; +MODULE_DEVICE_TABLE(i2c, nct7904_id); static struct i2c_driver nct7904_driver = { .class = I2C_CLASS_HWMON, diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index af162b4c7a6d..025686d41640 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iface); - dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " + dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, " "regs_base@%p\n", iface->regs_base); return 0; @@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init); module_exit(i2c_bfin_twi_exit); MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); -MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); +MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:i2c-bfin-twi"); diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index d1c22e3fdd14..fc9bf7f30e35 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* enable test mode */ reg |= OMAP_I2C_SYSTEST_ST_EN; + /* select SDA/SCL IO mode */ + reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT; + /* set SCL to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SCL_O; + /* set SDA to high-impedance state (reset value is 0) */ + reg |= OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } @@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap) u32 reg; reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); + /* restore reset values */ reg &= ~OMAP_I2C_SYSTEST_ST_EN; + reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK; + reg &= ~OMAP_I2C_SYSTEST_SCL_O; + reg &= ~OMAP_I2C_SYSTEST_SDA_O; omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index e6d4935161e4..c83e4d13cfc5 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + bri->set_scl(adap, val); + ndelay(RECOVERY_NDELAY); + /* * By this time SCL is high, as we need to give 9 falling-rising edges */ @@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap) int i2c_generic_scl_recovery(struct i2c_adapter *adap) { - adap->bus_recovery_info->set_scl(adap, 1); return i2c_generic_recovery(adap); } EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); @@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data) struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) { struct device *dev; + struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_client(dev); + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; } EXPORT_SYMBOL(of_find_i2c_device_by_node); @@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node); struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) { struct device *dev; + struct i2c_adapter *adapter; - dev = bus_find_device(&i2c_bus_type, NULL, node, - of_dev_node_match); + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); if (!dev) return NULL; - return i2c_verify_adapter(dev); + adapter = i2c_verify_adapter(dev); + if (!adapter) + put_device(dev); + + return adapter; } EXPORT_SYMBOL(of_find_i2c_adapter_by_node); #else diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 822374654609..1da449614779 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); @@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob struct eeprom_data *eeprom; unsigned long flags; - if (off + count > attr->size) - return -EFBIG; - eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); spin_lock_irqsave(&eeprom->buffer_lock, flags); diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 27b6a3ce18ca..891797ad76bc 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) if (n_buttons[i] < 1) continue; - if (n_buttons[i] > 6) { + if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 10e140af5aac..1ac898db303a 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c @@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver); MODULE_DESCRIPTION("axp20x Power Button"); MODULE_AUTHOR("Carlo Caione "); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:axp20x-pek"); diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index fc17b9592f54..10c4e3d462f1 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c @@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata, if (pdata && pdata->coexist) return true; - if (of_find_node_by_name(node, "codec")) { + node = of_find_node_by_name(node, "codec"); + if (node) { of_node_put(node); return true; } diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 113d6f1516a5..4d246861d692 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "psmouse.h" #include "alps.h" @@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ +#define ALPS_DELL 0x100 /* device is a Dell laptop */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { @@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) return; } - /* Non interleaved V2 dualpoint has separate stick button bits */ + /* Dell non interleaved V2 dualpoint has separate stick button bits */ if (priv->proto_version == ALPS_PROTO_V2 && - priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { + priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) { left |= packet[0] & 1; right |= packet[0] & 2; middle |= packet[0] & 4; @@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse, priv->byte0 = protocol->byte0; priv->mask0 = protocol->mask0; priv->flags = protocol->flags; + if (dmi_name_in_vendors("Dell")) + priv->flags |= ALPS_DELL; priv->x_max = 2000; priv->y_max = 1400; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 22b9ca901f4e..2955f1d0ca6c 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse) struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; unsigned char packet_type = packet[3] & 0x03; + unsigned int ic_version; bool sanity_check; if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) return PACKET_TRACKPOINT; + /* This represents the version of IC body. */ + ic_version = (etd->fw_version & 0x0f0000) >> 16; + /* * Sanity check based on the constant bits of a packet. * The constant bits change depending on the value of - * the hardware flag 'crc_enabled' but are the same for - * every packet, regardless of the type. + * the hardware flag 'crc_enabled' and the version of + * the IC body, but are the same for every packet, + * regardless of the type. */ if (etd->crc_enabled) sanity_check = ((packet[3] & 0x08) == 0x00); + else if (ic_version == 7 && etd->samples[1] == 0x2A) + sanity_check = ((packet[3] & 0x1c) == 0x10); else sanity_check = ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0x1c) == 0x10); @@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, * Avatar AVIU-145A2 0x361f00 ? clickpad * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons + * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) @@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse) etd->capabilities[0], etd->capabilities[1], etd->capabilities[2]); + if (etd->hw_version != 1) { + if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) { + psmouse_err(psmouse, "failed to query sample data\n"); + goto init_fail; + } + psmouse_info(psmouse, + "Elan sample query result %02x, %02x, %02x\n", + etd->samples[0], etd->samples[1], etd->samples[2]); + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index f965d1569cc3..e1cbf409d9c8 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -129,6 +129,7 @@ struct elantech_data { unsigned char reg_26; unsigned char debug; unsigned char capabilities[3]; + unsigned char samples[3]; bool paritycheck; bool jumpy_cursor; bool reports_pressure; diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index b7d54d428b5e..ff4be0515a0d 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) { - smp_call_function_interrupt(); + generic_smp_call_function_interrupt(); return IRQ_HANDLED; } diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c index 06022952a437..bbef98e7a16e 100644 --- a/drivers/isdn/mISDN/dsp_audio.c +++ b/drivers/isdn/mISDN/dsp_audio.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "core.h" #include "dsp.h" @@ -137,27 +138,14 @@ static unsigned char linear2ulaw(short sample) return ulawbyte; } -static int reverse_bits(int i) -{ - int z, j; - z = 0; - - for (j = 0; j < 8; j++) { - if ((i & (1 << j)) != 0) - z |= 1 << (7 - j); - } - return z; -} - - void dsp_audio_generate_law_tables(void) { int i; for (i = 0; i < 256; i++) - dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i)); + dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i)); for (i = 0; i < 256; i++) - dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i)); + dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i)); for (i = 0; i < 256; i++) { dsp_audio_alaw_to_ulaw[i] = @@ -176,13 +164,13 @@ dsp_audio_generate_s2law_table(void) /* generating ulaw-table */ for (i = -32768; i < 32768; i++) { dsp_audio_s16_to_law[i & 0xffff] = - reverse_bits(linear2ulaw(i)); + bitrev8(linear2ulaw(i)); } } else { /* generating alaw-table */ for (i = -32768; i < 32768; i++) { dsp_audio_s16_to_law[i & 0xffff] = - reverse_bits(linear2alaw(i)); + bitrev8(linear2alaw(i)); } } } diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index 1a57e88a38f7..cd35079c8c98 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 32814371b8d3..aa1b41ca40f7 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -1471,5 +1471,3 @@ module_exit(mq_exit); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mq cache policy"); - -MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index 48a4a826ae07..200366c62231 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1789,3 +1789,5 @@ module_exit(smq_exit); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("smq cache policy"); + +MODULE_ALIAS("dm-cache-default"); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 48dfe3c4d6aa..6ba47cfb1443 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) return r; disk_super = dm_block_data(copy); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); - dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); + dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root)); + dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root)); dm_sm_dec_block(pmd->metadata_sm, held_root); return dm_tm_unlock(pmd->tm, copy); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ab37ae114e94..0d7ab20c58df 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q, struct mapped_device *md = q->queuedata; struct dm_table *map = dm_get_live_table_fast(md); struct dm_target *ti; - sector_t max_sectors, max_size = 0; + sector_t max_sectors; + int max_size = 0; if (unlikely(!map)) goto out; @@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q, * Find maximum amount of I/O that won't need splitting */ max_sectors = min(max_io_len(bvm->bi_sector, ti), - (sector_t) queue_max_sectors(q)); + (sector_t) BIO_MAX_SECTORS); max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; - - /* - * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t - * to the targets' merge function since it holds sectors not bytes). - * Just doing this as an interim fix for stable@ because the more - * comprehensive cleanup of switching to sector_t will impact every - * DM target that implements a ->merge hook. - */ - if (max_size > INT_MAX) - max_size = INT_MAX; + if (max_size < 0) + max_size = 0; /* * merge_bvec_fn() returns number of bytes @@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q, * max is precomputed maximal io size */ if (max_size && ti->type->merge) - max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); + max_size = ti->type->merge(ti, bvm, biovec, max_size); /* * If the target doesn't support merge method and some of the devices - * provided their merge_bvec method (we know this by looking for the - * max_hw_sectors that dm_set_device_limits may set), then we can't - * allow bios with multiple vector entries. So always set max_size - * to 0, and the code below allows just one page. + * provided their merge_bvec method (we know this by looking at + * queue_max_hw_sectors), then we can't allow bios with multiple vector + * entries. So always set max_size to 0, and the code below allows + * just one page. */ else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) max_size = 0; diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c2a4e8b873c..e25f00f0138a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg) char *ptr; int err; - file = kmalloc(sizeof(*file), GFP_NOIO); + file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index bf2b80d5c470..8731b6ea026b 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key); extern struct dm_block_validator btree_node_validator; +/* + * Value type for upper levels of multi-level btrees. + */ +extern void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt); + #endif /* DM_BTREE_INTERNAL_H */ diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 9836c0ae897c..4222f774cf36 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, return r; } -static struct dm_btree_value_type le64_type = { - .context = NULL, - .size = sizeof(__le64), - .inc = NULL, - .dec = NULL, - .equal = NULL -}; - int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, uint64_t *keys, dm_block_t *new_root) { @@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < info->levels; level++) { r = remove_raw(&spine, info, (level == last_level ? - &info->value_type : &le64_type), + &info->value_type : &le64_vt), root, keys[level], (unsigned *)&index); if (r < 0) break; @@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, int index = 0, r = 0; struct shadow_spine spine; struct btree_node *n; + struct dm_btree_value_type le64_vt; uint64_t k; + init_le64_type(info->tm, &le64_vt); init_shadow_spine(&spine, info); for (level = 0; level < last_level; level++) { - r = remove_raw(&spine, info, &le64_type, + r = remove_raw(&spine, info, &le64_vt, root, keys[level], (unsigned *) &index); if (r < 0) goto out; @@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root, value_ptr(n, index)); delete_at(n, index); + keys[last_level] = k + 1ull; } else r = -ENODATA; diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index 1b5e13ec7f96..0dee514ba4c5 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s) { return s->root; } + +static void le64_inc(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_inc(tm, le64_to_cpu(v_le)); +} + +static void le64_dec(void *context, const void *value_le) +{ + struct dm_transaction_manager *tm = context; + __le64 v_le; + + memcpy(&v_le, value_le, sizeof(v_le)); + dm_tm_dec(tm, le64_to_cpu(v_le)); +} + +static int le64_equal(void *context, const void *value1_le, const void *value2_le) +{ + __le64 v1_le, v2_le; + + memcpy(&v1_le, value1_le, sizeof(v1_le)); + memcpy(&v2_le, value2_le, sizeof(v2_le)); + return v1_le == v2_le; +} + +void init_le64_type(struct dm_transaction_manager *tm, + struct dm_btree_value_type *vt) +{ + vt->context = tm; + vt->size = sizeof(__le64); + vt->inc = le64_inc; + vt->dec = le64_dec; + vt->equal = le64_equal; +} diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index fdd3793e22f9..c7726cebc495 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root, struct btree_node *n; struct dm_btree_value_type le64_type; - le64_type.context = NULL; - le64_type.size = sizeof(__le64); - le64_type.inc = NULL; - le64_type.dec = NULL; - le64_type.equal = NULL; - + init_le64_type(info->tm, &le64_type); init_shadow_spine(&spine, info); for (level = 0; level < (info->levels - 1); level++) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 94f5b55069e0..967a4ed73929 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; + unsigned long flags; /* * If it is not operational, then we have already marked it as dead @@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); + spin_lock_irqsave(&conf->device_lock, flags); if (test_and_clear_bit(In_sync, &rdev->flags)) { - unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded++; set_bit(Faulty, &rdev->flags); - spin_unlock_irqrestore(&conf->device_lock, flags); } else set_bit(Faulty, &rdev->flags); + spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. */ @@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev) * Find all failed disks within the RAID1 configuration * and mark them readable. * Called under mddev lock, so rcu protection not needed. + * device_lock used to avoid races with raid1_end_read_request + * which expects 'In_sync' flags and ->degraded to be consistent. */ + spin_lock_irqsave(&conf->device_lock, flags); for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = conf->mirrors[i].rdev; struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; @@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev) sysfs_notify_dirent_safe(rdev->sysfs_state); } } - spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; spin_unlock_irqrestore(&conf->device_lock, flags); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 643d217bfa13..f757023fc458 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) static int drop_one_stripe(struct r5conf *conf) { struct stripe_head *sh; - int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; + int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; spin_lock_irq(conf->hash_locks + hash); sh = get_free_stripe(conf, hash); @@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, if (mutex_trylock(&conf->cache_size_mutex)) { ret= 0; - while (ret < sc->nr_to_scan) { + while (ret < sc->nr_to_scan && + conf->max_nr_stripes > conf->min_nr_stripes) { if (drop_one_stripe(conf) == 0) { ret = SHRINK_STOP; break; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 653815950aa2..3f68dd251ce8 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C config MFD_CROS_EC_SPI tristate "ChromeOS Embedded Controller (SPI)" - depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF + depends on MFD_CROS_EC && CROS_EC_PROTO && SPI ---help--- If you say Y here, you get support for talking to the ChromeOS EC diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index bebf58a06a6b..a72ddb295078 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev) arizona->has_fully_powered_off = true; - disable_irq(arizona->irq); + disable_irq_nosync(arizona->irq); arizona_enable_reset(arizona); regulator_bulk_disable(arizona->num_core_supplies, arizona->core_supplies); @@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.gpio_defaults[i]); } - pm_runtime_set_autosuspend_delay(arizona->dev, 100); - pm_runtime_use_autosuspend(arizona->dev); - pm_runtime_enable(arizona->dev); - /* Chip default */ if (!arizona->pdata.clk32k_src) arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; @@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona) arizona->pdata.spk_fmt[i]); } + pm_runtime_set_active(arizona->dev); + pm_runtime_enable(arizona->dev); + /* Set up for interrupts */ ret = arizona_irq_init(arizona); if (ret != 0) goto err_reset; + pm_runtime_set_autosuspend_delay(arizona->dev, 100); + pm_runtime_use_autosuspend(arizona->dev); + arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", arizona_clkgen_err, arizona); arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", @@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona) goto err_irq; } -#ifdef CONFIG_PM - regulator_disable(arizona->dcvdd); -#endif - return 0; err_irq: diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 2d3db81be099..6ded3dc36644 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj, { struct at24_data *at24; - if (unlikely(off >= attr->size)) - return -EFBIG; - at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); return at24_write(at24, buf, off, count); } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c18f9e62a9fa..f50373645ab4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -282,7 +282,6 @@ config VETH config VIRTIO_NET tristate "Virtio network driver" depends on VIRTIO - select AVERAGE ---help--- This is the virtual network driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. @@ -297,6 +296,13 @@ config NLMON diagnostics, etc. This is mostly intended for developers or support to debug netlink issues. If unsure, say N. +config NET_VRF + tristate "Virtual Routing and Forwarding (Lite)" + depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES + ---help--- + This option enables the support for mapping interfaces into VRF's. The + support enables VRF devices. + endif # NET_CORE config SUNGEM_PHY diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c12cb22478a7..ca16dd689b36 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o obj-$(CONFIG_VXLAN) += vxlan.o obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_NLMON) += nlmon.o +obj-$(CONFIG_NET_VRF) += vrf.o # # Networking Drivers diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0c627b4733ca..0ef2ed3a610e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond) slave ? slave->dev->name : "NULL"); if (!slave || !bond->send_peer_notif || + !netif_carrier_ok(bond->dev) || test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; @@ -4119,9 +4120,8 @@ void bond_setup(struct net_device *bond_dev) SET_NETDEV_DEVTYPE(bond_dev, &bond_type); /* Initialize the device options */ - bond_dev->tx_queue_len = 0; bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; - bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT; + bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); /* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index b3b922adc0e4..615c65da39be 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1120,7 +1120,7 @@ static void cfhsi_setup(struct net_device *dev) dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->destructor = free_netdev; dev->netdev_ops = &cfhsi_netdevops; for (i = 0; i < CFHSI_PRIO_LAST; ++i) diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index 9da06537237f..c2dea4916e5d 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -427,7 +427,7 @@ static void caifdev_setup(struct net_device *dev) dev->type = ARPHRD_CAIF; dev->flags = IFF_POINTOPOINT | IFF_NOARP; dev->mtu = CAIF_MAX_MTU; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->destructor = free_netdev; skb_queue_head_init(&serdev->head); serdev->common.link_select = CAIF_LINK_LOW_LATENCY; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 72ea9ff9bb9c..de3962014af7 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -710,7 +710,7 @@ static void cfspi_setup(struct net_device *dev) dev->netdev_ops = &cfspi_ops; dev->type = ARPHRD_CAIF; dev->flags = IFF_NOARP | IFF_POINTOPOINT; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->mtu = SPI_MAX_PAYLOAD_SIZE; dev->destructor = free_netdev; skb_queue_head_init(&cfspi->qhead); diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 1c7808495a9d..735f04cd83ee 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -116,9 +116,9 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_join_bridge = mv88e6xxx_join_bridge, .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, - .fdb_add = mv88e6xxx_port_fdb_add, - .fdb_del = mv88e6xxx_port_fdb_del, - .fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, }; MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index af210efecc55..14b71779df99 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -123,8 +123,9 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, - 0xc000 | (addr & 0xff)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_READ | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) goto error; @@ -132,7 +133,7 @@ static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr) if (ret < 0) goto error; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA); error: mutex_unlock(&ps->eeprom_mutex); return ret; @@ -205,11 +206,11 @@ static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds) { int ret; - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14); + ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP); if (ret < 0) return ret; - if (!(ret & 0x0400)) + if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN)) return -EROFS; return 0; @@ -223,12 +224,13 @@ static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr, mutex_lock(&ps->eeprom_mutex); - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); if (ret < 0) goto error; - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14, - 0xb000 | (addr & 0xff)); + ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP, + GLOBAL2_EEPROM_OP_WRITE | + (addr & GLOBAL2_EEPROM_OP_ADDR_MASK)); if (ret < 0) goto error; @@ -341,9 +343,14 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_join_bridge = mv88e6xxx_join_bridge, .port_leave_bridge = mv88e6xxx_leave_bridge, .port_stp_update = mv88e6xxx_port_stp_update, - .fdb_add = mv88e6xxx_port_fdb_add, - .fdb_del = mv88e6xxx_port_fdb_del, - .fdb_getnext = mv88e6xxx_port_fdb_getnext, + .port_pvid_get = mv88e6xxx_port_pvid_get, + .port_pvid_set = mv88e6xxx_port_pvid_set, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .vlan_getnext = mv88e6xxx_vlan_getnext, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_getnext = mv88e6xxx_port_fdb_getnext, }; MODULE_ALIAS("platform:mv88e6172"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 109452056eff..2ab3f9810593 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2,6 +2,9 @@ * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support * Copyright (c) 2008 Marvell Semiconductor * + * Copyright (c) 2015 CMC Electronics, Inc. + * Added support for VLAN Table Unit operations + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -964,7 +967,7 @@ static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd) { int ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid); + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); if (ret < 0) return ret; @@ -1091,7 +1094,7 @@ int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) ps->bridge_mask[fid] = br_port_mask; if (fid != ps->fid[port]) { - ps->fid_mask |= 1 << ps->fid[port]; + clear_bit(ps->fid[port], ps->fid_bitmap); ps->fid[port] = fid; ret = _mv88e6xxx_update_bridge_config(ds, fid); } @@ -1125,9 +1128,16 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) mutex_lock(&ps->smi_mutex); - newfid = __ffs(ps->fid_mask); + newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1); + if (unlikely(newfid > ps->num_ports)) { + netdev_err(ds->ports[port], "all first %d FIDs are used\n", + ps->num_ports); + ret = -ENOSPC; + goto unlock; + } + ps->fid[port] = newfid; - ps->fid_mask &= ~(1 << newfid); + set_bit(newfid, ps->fid_bitmap); ps->bridge_mask[fid] &= ~(1 << port); ps->bridge_mask[newfid] = 1 << port; @@ -1135,6 +1145,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) if (!ret) ret = _mv88e6xxx_update_bridge_config(ds, newfid); +unlock: mutex_unlock(&ps->smi_mutex); return ret; @@ -1174,8 +1185,476 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) return 0; } -static int __mv88e6xxx_write_addr(struct dsa_switch *ds, - const unsigned char *addr) +int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) +{ + int ret; + + ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN); + if (ret < 0) + return ret; + + *pvid = ret & PORT_DEFAULT_VLAN_MASK; + + return 0; +} + +int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid) +{ + return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN, + pvid & PORT_DEFAULT_VLAN_MASK); +} + +static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds) +{ + return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); +} + +static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op) +{ + int ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_wait(ds); +} + +static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds) +{ + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL); +} + +static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 regs[3]; + int i; + int ret; + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i); + if (ret < 0) + return ret; + + regs[i] = ret; + } + + for (i = 0; i < ps->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u16 reg = regs[i / 4]; + + entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK; + } + + return 0; +} + +static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 regs[3] = { 0 }; + int i; + int ret; + + for (i = 0; i < ps->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u8 data = entry->data[i]; + + regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift; + } + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i, regs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.vid = ret & GLOBAL_VTU_VID_MASK; + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0); + if (ret < 0) + return ret; + + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_FID); + if (ret < 0) + return ret; + + next.fid = ret & GLOBAL_VTU_FID_MASK; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + } + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port member tags */ + ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0); + if (ret < 0) + return ret; + + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); + if (ret < 0) + return ret; + + reg = entry->fid & GLOBAL_VTU_FID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); + if (ret < 0) + return ret; + } + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + reg |= entry->vid & GLOBAL_VTU_VID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE); +} + +static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, + sid & GLOBAL_VTU_SID_MASK); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2); + if (ret < 0) + return ret; + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(ds); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port states */ + ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2); + if (ret < 0) + return ret; + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); +} + +static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan = { + .valid = true, + .vid = vid, + }; + int i; + + /* exclude all ports except the CPU */ + for (i = 0; i < ps->num_ports; ++i) + vlan.data[i] = dsa_is_cpu_port(ds, i) ? + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + struct mv88e6xxx_vtu_stu_entry vstp; + int err; + + /* Adding a VTU entry requires a valid STU entry. As VSTP is not + * implemented, only one STU entry is needed to cover all VTU + * entries. Thus, validate the SID 0. + */ + vlan.sid = 0; + err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp); + if (err) + return err; + + if (vstp.sid != vlan.sid || !vstp.valid) { + memset(&vstp, 0, sizeof(vstp)); + vstp.valid = true; + vstp.sid = vlan.sid; + + err = _mv88e6xxx_stu_loadpurge(ds, &vstp); + if (err) + return err; + } + + /* Non-bridged ports and bridge groups use FIDs from 1 to + * num_ports; VLANs use FIDs from num_ports+1 to 4095. + */ + vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, + ps->num_ports + 1); + if (unlikely(vlan.fid == VLAN_N_VID)) { + pr_err("no more FID available for VLAN %d\n", vid); + return -ENOSPC; + } + + err = _mv88e6xxx_flush_fid(ds, vlan.fid); + if (err) + return err; + + set_bit(vlan.fid, ps->fid_bitmap); + } + + *entry = vlan; + return 0; +} + +int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, + bool untagged) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + if (err) + goto unlock; + + if (vlan.vid != vid || !vlan.valid) { + err = _mv88e6xxx_vlan_init(ds, vid, &vlan); + if (err) + goto unlock; + } + + vlan.data[port] = untagged ? + GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; + + err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + +int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + bool keep = false; + int i, err; + + mutex_lock(&ps->smi_mutex); + + err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan); + if (err) + goto unlock; + + if (vlan.vid != vid || !vlan.valid || + vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + err = -ENOENT; + goto unlock; + } + + vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + /* keep the VLAN unless all ports are excluded */ + for (i = 0; i < ps->num_ports; ++i) { + if (dsa_is_cpu_port(ds, i)) + continue; + + if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + keep = true; + break; + } + } + + vlan.valid = keep; + err = _mv88e6xxx_vtu_loadpurge(ds, &vlan); + if (err) + goto unlock; + + if (!keep) + clear_bit(vlan.fid, ps->fid_bitmap); + +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + +static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + int err; + + do { + if (vid == 4095) + return -ENOENT; + + err = _mv88e6xxx_vtu_getnext(ds, vid, entry); + if (err) + return err; + + if (!entry->valid) + return -ENOENT; + + vid = entry->vid; + } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED && + entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED); + + return 0; +} + +int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, + unsigned long *ports, unsigned long *untagged) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next; + int port; + int err; + + if (*vid == 4095) + return -ENOENT; + + mutex_lock(&ps->smi_mutex); + err = _mv88e6xxx_vtu_getnext(ds, *vid, &next); + mutex_unlock(&ps->smi_mutex); + + if (err) + return err; + + if (!next.valid) + return -ENOENT; + + *vid = next.vid; + + for (port = 0; port < ps->num_ports; ++port) { + clear_bit(port, ports); + clear_bit(port, untagged); + + if (dsa_is_cpu_port(ds, port)) + continue; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED || + next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + set_bit(port, ports); + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + set_bit(port, untagged); + } + + return 0; +} + +static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds, + const unsigned char *addr) { int i, ret; @@ -1190,7 +1669,7 @@ static int __mv88e6xxx_write_addr(struct dsa_switch *ds, return 0; } -static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr) +static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr) { int i, ret; @@ -1206,29 +1685,83 @@ static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr) return 0; } -static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port, - const unsigned char *addr, int state) +static int _mv88e6xxx_atu_load(struct dsa_switch *ds, + struct mv88e6xxx_atu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid = ps->fid[port]; + u16 reg = 0; int ret; ret = _mv88e6xxx_atu_wait(ds); if (ret < 0) return ret; - ret = __mv88e6xxx_write_addr(ds, addr); + ret = _mv88e6xxx_atu_mac_write(ds, entry->mac); if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, - (0x10 << port) | state); - if (ret) + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (entry->trunk) { + reg |= GLOBAL_ATU_DATA_TRUNK; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + reg |= (entry->portv_trunkid << shift) & mask; + } + + reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg); + if (ret < 0) return ret; - ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB); + return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB); +} - return ret; +static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + if (vid == 0) + return ps->fid[port]; + + err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan); + if (err) + return err; + + if (vlan.vid == vid) + return vlan.fid; + + return -ENOENT; +} + +static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry = { 0 }; + int ret; + + ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid); + if (ret < 0) + return ret; + + entry.fid = ret; + entry.state = state; + ether_addr_copy(entry.mac, addr); + if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { + entry.trunk = false; + entry.portv_trunkid = BIT(port); + } + + return _mv88e6xxx_atu_load(ds, &entry); } int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, @@ -1241,7 +1774,7 @@ int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state); + ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state); mutex_unlock(&ps->smi_mutex); return ret; @@ -1254,61 +1787,105 @@ int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, + ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, GLOBAL_ATU_DATA_STATE_UNUSED); mutex_unlock(&ps->smi_mutex); return ret; } -static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static) +static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, + const unsigned char *addr, + struct mv88e6xxx_atu_entry *entry) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - u8 fid = ps->fid[port]; - int ret, state; + struct mv88e6xxx_atu_entry next = { 0 }; + int ret; + + next.fid = fid; ret = _mv88e6xxx_atu_wait(ds); if (ret < 0) return ret; - ret = __mv88e6xxx_write_addr(ds, addr); + ret = _mv88e6xxx_atu_mac_write(ds, addr); if (ret < 0) return ret; - do { - ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); - if (ret < 0) - return ret; - state = ret & GLOBAL_ATU_DATA_STATE_MASK; - if (state == GLOBAL_ATU_DATA_STATE_UNUSED) - return -ENOENT; - } while (!(((ret >> 4) & 0xff) & (1 << port))); - - ret = __mv88e6xxx_read_addr(ds, addr); + ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); if (ret < 0) return ret; - *is_static = state == (is_multicast_ether_addr(addr) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC); + ret = _mv88e6xxx_atu_mac_read(ds, next.mac); + if (ret < 0) + return ret; + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA); + if (ret < 0) + return ret; + + next.state = ret & GLOBAL_ATU_DATA_STATE_MASK; + if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (ret & GLOBAL_ATU_DATA_TRUNK) { + next.trunk = true; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + next.trunk = false; + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + next.portv_trunkid = (ret & mask) >> shift; + } + + *entry = next; return 0; } /* get next entry for port */ int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static) + unsigned char *addr, u16 *vid, bool *is_static) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_atu_entry next; + u16 fid; int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static); + + ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid); + if (ret < 0) + goto unlock; + fid = ret; + + do { + if (is_broadcast_ether_addr(addr)) { + struct mv88e6xxx_vtu_stu_entry vtu; + + ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu); + if (ret < 0) + goto unlock; + + *vid = vtu.vid; + fid = vtu.fid; + } + + ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next); + if (ret < 0) + goto unlock; + + ether_addr_copy(addr, next.mac); + + if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED) + continue; + } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0); + + *is_static = next.state == (is_multicast_ether_addr(addr) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); +unlock: mutex_unlock(&ps->smi_mutex); return ret; @@ -1349,8 +1926,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * full duplex. */ reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); - if (dsa_is_cpu_port(ds, port) || - ds->dsa_port_mask & (1 << port)) { + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { reg |= PORT_PCS_CTRL_FORCE_LINK | PORT_PCS_CTRL_LINK_UP | PORT_PCS_CTRL_DUPLEX_FULL | @@ -1411,12 +1987,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_EGRESS_ADD_TAG; } } - if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || - mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) || - mv88e6xxx_6320_family(ds)) { - if (ds->dsa_port_mask & (1 << port)) + if (dsa_is_dsa_port(ds, port)) { + if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || + mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || + mv88e6xxx_6320_family(ds)) { reg |= PORT_CONTROL_FRAME_MODE_DSA; + } + if (port == dsa_upstream_port(ds)) reg |= PORT_CONTROL_FORWARD_UNKNOWN | PORT_CONTROL_FORWARD_UNKNOWN_MC; @@ -1428,13 +2007,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; } - /* Port Control 2: don't force a good FCS, set the maximum - * frame size to 10240 bytes, don't let the switch add or - * strip 802.1q tags, don't discard tagged or untagged frames - * on this port, do a destination address lookup on all - * received packets as usual, disable ARP mirroring and don't - * send a copy of all transmitted/received frames on this port - * to the CPU. + /* Port Control 2: don't force a good FCS, set the maximum frame size to + * 10240 bytes, enable secure 802.1q tags, don't discard tagged or + * untagged frames on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't send a + * copy of all transmitted/received frames on this port to the CPU. */ reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || @@ -1456,6 +2033,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; } + reg |= PORT_CONTROL_2_8021Q_SECURE; + if (reg) { ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, reg); @@ -1552,9 +2131,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * ports, and allow each of the 'real' ports to only talk to * the upstream port. */ - fid = __ffs(ps->fid_mask); + fid = port + 1; ps->fid[port] = fid; - ps->fid_mask &= ~(1 << fid); + set_bit(fid, ps->fid_bitmap); if (!dsa_is_cpu_port(ds, port)) ps->bridge_mask[fid] = 1 << port; @@ -1651,7 +2230,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, unsigned char addr[6]; int ret, data, state; - ret = __mv88e6xxx_write_addr(ds, bcast); + ret = _mv88e6xxx_atu_mac_write(ds, bcast); if (ret < 0) return ret; @@ -1666,7 +2245,7 @@ static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds, state = data & GLOBAL_ATU_DATA_STATE_MASK; if (state == GLOBAL_ATU_DATA_STATE_UNUSED) break; - ret = __mv88e6xxx_read_addr(ds, addr); + ret = _mv88e6xxx_atu_mac_read(ds, addr); if (ret < 0) return ret; mv88e6xxx_atu_show_entry(s, dbnum, addr, data); @@ -1853,8 +2432,6 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds) ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0; - ps->fid_mask = (1 << DSA_MAX_PORTS) - 1; - INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work); name = kasprintf(GFP_KERNEL, "dsa%d", ds->index); @@ -1982,6 +2559,12 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds) /* Wait for the flush to complete. */ mutex_lock(&ps->smi_mutex); ret = _mv88e6xxx_stats_wait(ds); + if (ret < 0) + goto unlock; + + /* Clear all the VTU and STU entries */ + ret = _mv88e6xxx_vtu_stu_flush(ds); +unlock: mutex_unlock(&ps->smi_mutex); return ret; diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 78e37226a37d..72ca887feb0d 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -11,6 +11,8 @@ #ifndef __MV88E6XXX_H #define __MV88E6XXX_H +#include + #ifndef UINT64_MAX #define UINT64_MAX (u64)(~((u64)0)) #endif @@ -129,6 +131,7 @@ #define PORT_CONTROL_1 0x05 #define PORT_BASE_VLAN 0x06 #define PORT_DEFAULT_VLAN 0x07 +#define PORT_DEFAULT_VLAN_MASK 0xfff #define PORT_CONTROL_2 0x08 #define PORT_CONTROL_2_IGNORE_FCS BIT(15) #define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14) @@ -137,6 +140,11 @@ #define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12) #define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12) #define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12) +#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10) +#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10) +#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10) +#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10) +#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10) #define PORT_CONTROL_2_DISCARD_TAGGED BIT(9) #define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) #define PORT_CONTROL_2_MAP_DA BIT(7) @@ -169,6 +177,11 @@ #define GLOBAL_MAC_01 0x01 #define GLOBAL_MAC_23 0x02 #define GLOBAL_MAC_45 0x03 +#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID_MASK 0xfff +#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_SID_MASK 0x3f #define GLOBAL_CONTROL 0x04 #define GLOBAL_CONTROL_SW_RESET BIT(15) #define GLOBAL_CONTROL_PPU_ENABLE BIT(14) @@ -185,10 +198,27 @@ #define GLOBAL_CONTROL_TCAM_EN BIT(1) #define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) #define GLOBAL_VTU_OP 0x05 +#define GLOBAL_VTU_OP_BUSY BIT(15) +#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY) #define GLOBAL_VTU_VID 0x06 +#define GLOBAL_VTU_VID_MASK 0xfff +#define GLOBAL_VTU_VID_VALID BIT(12) #define GLOBAL_VTU_DATA_0_3 0x07 #define GLOBAL_VTU_DATA_4_7 0x08 #define GLOBAL_VTU_DATA_8_11 0x09 +#define GLOBAL_VTU_STU_DATA_MASK 0x03 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01 +#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02 +#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03 +#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00 +#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01 +#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02 +#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03 #define GLOBAL_ATU_CONTROL 0x0a #define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3) #define GLOBAL_ATU_OP 0x0b @@ -203,6 +233,8 @@ #define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY) #define GLOBAL_ATU_DATA 0x0c #define GLOBAL_ATU_DATA_TRUNK BIT(15) +#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0 +#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4 #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 #define GLOBAL_ATU_DATA_STATE_MASK 0x0f @@ -285,8 +317,12 @@ #define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3) #define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0 #define GLOBAL2_EEPROM_OP 0x14 -#define GLOBAL2_EEPROM_OP_BUSY BIT(15) -#define GLOBAL2_EEPROM_OP_LOAD BIT(11) +#define GLOBAL2_EEPROM_OP_BUSY BIT(15) +#define GLOBAL2_EEPROM_OP_WRITE ((3 << 12) | GLOBAL2_EEPROM_OP_BUSY) +#define GLOBAL2_EEPROM_OP_READ ((4 << 12) | GLOBAL2_EEPROM_OP_BUSY) +#define GLOBAL2_EEPROM_OP_LOAD BIT(11) +#define GLOBAL2_EEPROM_OP_WRITE_EN BIT(10) +#define GLOBAL2_EEPROM_OP_ADDR_MASK 0xff #define GLOBAL2_EEPROM_DATA 0x15 #define GLOBAL2_PTP_AVB_OP 0x16 #define GLOBAL2_PTP_AVB_DATA 0x17 @@ -309,6 +345,25 @@ #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d +struct mv88e6xxx_atu_entry { + u16 fid; + u8 state; + bool trunk; + u16 portv_trunkid; + u8 mac[ETH_ALEN]; +}; + +struct mv88e6xxx_vtu_stu_entry { + /* VTU only */ + u16 vid; + u16 fid; + + /* VTU and STU */ + u8 sid; + bool valid; + u8 data[DSA_MAX_PORTS]; +}; + struct mv88e6xxx_priv_state { /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip @@ -347,9 +402,9 @@ struct mv88e6xxx_priv_state { /* hw bridging */ - u32 fid_mask; - u8 fid[DSA_MAX_PORTS]; - u16 bridge_mask[DSA_MAX_PORTS]; + DECLARE_BITMAP(fid_bitmap, VLAN_N_VID); /* FIDs 1 to 4095 available */ + u16 fid[DSA_MAX_PORTS]; /* per (non-bridged) port FID */ + u16 bridge_mask[DSA_MAX_PORTS]; /* br groups (indexed by FID) */ unsigned long port_state_update_mask; u8 port_state[DSA_MAX_PORTS]; @@ -409,12 +464,19 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); +int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid); +int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid); +int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, + bool untagged); +int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid); +int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, + unsigned long *ports, unsigned long *untagged); int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static); + unsigned char *addr, u16 *vid, bool *is_static); int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg); int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 49adbf1b7574..815eb94990f5 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -144,10 +144,9 @@ static void dummy_setup(struct net_device *dev) dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ - dev->tx_queue_len = 0; dev->flags |= IFF_NOARP; dev->flags &= ~IFF_MULTICAST; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; eth_hw_addr_random(dev); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 2d1ce3c5d0dd..753887d02b46 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev) vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { - int j; pr_emerg("%s: no memory for rx ring\n", dev->name); - for (j = 0; j < i; j++) { - if (vp->rx_skbuff[j]) { - dev_kfree_skb(vp->rx_skbuff[j]); - vp->rx_skbuff[j] = NULL; - } - } retval = -ENOMEM; - goto err_free_irq; + goto err_free_skb; } /* Wrap the ring. */ vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); @@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev) if (!retval) goto out; -err_free_irq: +err_free_skb: + for (i = 0; i < RX_RING_SIZE; i++) { + if (vp->rx_skbuff[i]) { + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } free_irq(dev->irq, dev); err: if (vortex_debug > 1) diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 580553d42d34..88ef67a998b4 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -71,8 +71,6 @@ int sgdma_initialize(struct altera_tse_private *priv) SGDMA_CTRLREG_INTEN | SGDMA_CTRLREG_ILASTD; - priv->sgdmadesclen = sizeof(struct sgdma_descrip); - INIT_LIST_HEAD(&priv->txlisthd); INIT_LIST_HEAD(&priv->rxlisthd); @@ -254,7 +252,7 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) unsigned int pktstatus = 0; dma_sync_single_for_cpu(priv->device, priv->rxdescphys, - priv->sgdmadesclen, + SGDMA_DESC_LEN, DMA_FROM_DEVICE); pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); @@ -374,7 +372,7 @@ static int sgdma_async_read(struct altera_tse_private *priv) dma_sync_single_for_device(priv->device, priv->rxdescphys, - priv->sgdmadesclen, + SGDMA_DESC_LEN, DMA_TO_DEVICE); csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), @@ -402,7 +400,7 @@ static int sgdma_async_write(struct altera_tse_private *priv, csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); dma_sync_single_for_device(priv->device, priv->txdescphys, - priv->sgdmadesclen, DMA_TO_DEVICE); + SGDMA_DESC_LEN, DMA_TO_DEVICE); csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), priv->tx_dma_csr, diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index 85bc33b218d9..bbd52f02330b 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -50,6 +50,7 @@ struct sgdma_descrip { u8 control; } __packed; +#define SGDMA_DESC_LEN sizeof(struct sgdma_descrip) #define SGDMA_STATUS_ERR BIT(0) #define SGDMA_STATUS_LENGTH_ERR BIT(1) diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 2adb24d4523c..103c30ddddf7 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -458,7 +458,6 @@ struct altera_tse_private { u32 rxctrlreg; dma_addr_t rxdescphys; dma_addr_t txdescphys; - size_t sgdmadesclen; struct list_head txlisthd; struct list_head rxlisthd; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 299eb4315fe6..4f68d19c45bd 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -905,40 +905,6 @@ static int xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pda return ret; } -static int xgene_get_mac_address(struct device *dev, - unsigned char *addr) -{ - int ret; - - ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6); - if (ret) - ret = device_property_read_u8_array(dev, "mac-address", - addr, 6); - if (ret) - return -ENODEV; - - return ETH_ALEN; -} - -static int xgene_get_phy_mode(struct device *dev) -{ - int i, ret; - char *modestr; - - ret = device_property_read_string(dev, "phy-connection-type", - (const char **)&modestr); - if (ret) - ret = device_property_read_string(dev, "phy-mode", - (const char **)&modestr); - if (ret) - return -ENODEV; - - for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) { - if (!strcasecmp(modestr, phy_modes(i))) - return i; - } - return -ENODEV; -} static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { @@ -998,12 +964,12 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN) + if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN)) eth_hw_addr_random(ndev); memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - pdata->phy_mode = xgene_get_phy_mode(dev); + pdata->phy_mode = device_get_phy_mode(dev); if (pdata->phy_mode < 0) { dev_err(dev, "Unable to get phy-connection-type\n"); return pdata->phy_mode; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 932bd1862f7a..2795d6db10e1 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -874,6 +874,8 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, atl1c_clean_buffer(pdev, buffer_info); } + netdev_reset_queue(adapter->netdev); + /* Zero out Tx-buffers */ memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * ring_count); @@ -1551,6 +1553,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 hw_next_to_clean; u16 reg; + unsigned int total_bytes = 0, total_packets = 0; reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; @@ -1558,12 +1561,18 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, while (next_to_clean != hw_next_to_clean) { buffer_info = &tpd_ring->buffer_info[next_to_clean]; + if (buffer_info->skb) { + total_bytes += buffer_info->skb->len; + total_packets++; + } atl1c_clean_buffer(pdev, buffer_info); if (++next_to_clean == tpd_ring->count) next_to_clean = 0; atomic_set(&tpd_ring->next_to_clean, next_to_clean); } + netdev_completed_queue(adapter->netdev, total_packets, total_bytes); + if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) { netif_wake_queue(adapter->netdev); @@ -2256,6 +2265,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); } else { + netdev_sent_queue(adapter->netdev, skb->len); atl1c_tx_queue(adapter, skb, tpd, type); spin_unlock_irqrestore(&adapter->tx_lock, flags); } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 8be9eab73320..e930aa9a3cfb 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -139,6 +139,16 @@ config BNX2X_SRIOV Virtualization support in the 578xx and 57712 products. This allows for virtual function acceleration in virtual environments. +config BNX2X_VXLAN + bool "Virtual eXtensible Local Area Network support" + default n + depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m) + ---help--- + This enables hardward offload support for VXLAN protocol over the + NetXtremeII series adapters. + Say Y here if you want to enable hardware offload support for + Virtual eXtensible Local Area Network (VXLAN) in the driver. + config BGMAC tristate "BCMA bus GBit core support" depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 5762c485ea06..ba936635322a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1392,6 +1392,8 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, + BNX2X_SP_RTNL_ADD_VXLAN_PORT, + BNX2X_SP_RTNL_DEL_VXLAN_PORT, }; enum bnx2x_iov_flag { @@ -2571,6 +2573,10 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) +/* Determines whether BW configuration arrives in 100Mb units or in + * percentages from actual physical link speed. + */ +#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp)) #define SET_FLAG(value, mask, flag) \ do {\ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1637de6caf46..44173be5cbf0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -264,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, if (likely(skb)) { (*pkts_compl)++; (*bytes_compl) += skb->len; + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); tx_buf->first_bd = 0; tx_buf->skb = NULL; @@ -1190,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) /* Calculate the current MAX line speed limit for the MF * devices */ - if (IS_MF_SI(bp)) + if (IS_MF_PERCENT_BW(bp)) line_speed = (line_speed * maxCfg) / 100; else { /* SD mode */ u16 vn_max_rate = maxCfg * 100; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index e18a0e4d3ed1..b7d32e8412f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -967,6 +967,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp) else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; + start_params->vxlan_dst_port = bp->vxlan_dst_port; + start_params->inner_rss = 1; if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 6f909077b919..aeb7ce64452e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1723,6 +1723,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, offset += sizeof(u32); data_buf += sizeof(u32); written_so_far += sizeof(u32); + + /* At end of each 4Kb page, release nvram lock to allow MFW + * chance to take it for its own use. + */ + if ((cmd_flags & MCPR_NVM_COMMAND_LAST) && + (written_so_far < buf_size)) { + DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, + "Releasing NVM lock after offset 0x%x\n", + (u32)(offset - sizeof(u32))); + bnx2x_release_nvram_lock(bp); + usleep_range(1000, 2000); + rc = bnx2x_acquire_nvram_lock(bp); + if (rc) + return rc; + } + cmd_flags = 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ad73a60de333..26fbfcc6f7db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2494,7 +2494,7 @@ static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn, else { u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); - if (IS_MF_SI(bp)) { + if (IS_MF_PERCENT_BW(bp)) { /* maxCfg in percents of linkspeed */ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; } else /* SD modes */ @@ -10075,6 +10075,81 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } +#ifdef CONFIG_BNX2X_VXLAN +static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) +{ + struct bnx2x_func_switch_update_params *switch_update_params; + struct bnx2x_func_state_params func_params = {NULL}; + int rc; + + switch_update_params = &func_params.params.switch_update; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + /* Function parameters */ + __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes); + switch_update_params->vxlan_dst_port = port; + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) + BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n", + port, rc); + return rc; +} + +static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) +{ + if (!netif_running(bp->dev)) + return; + + if (bp->vxlan_dst_port || !IS_PF(bp)) { + DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); + return; + } + + bp->vxlan_dst_port = port; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); +} + +static void bnx2x_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_add_vxlan_port(bp, t_port); +} + +static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) +{ + if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) { + DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); + return; + } + + if (netif_running(bp->dev)) { + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); + } else { + bp->vxlan_dst_port = 0; + netdev_info(bp->dev, "Deleted vxlan dest port %d", port); + } +} + +static void bnx2x_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_del_vxlan_port(bp, t_port); +} +#endif + static int bnx2x_close(struct net_device *dev); /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is @@ -10083,6 +10158,9 @@ static int bnx2x_close(struct net_device *dev); static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); +#ifdef CONFIG_BNX2X_VXLAN + u16 port; +#endif rtnl_lock(); @@ -10181,6 +10259,27 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); +#ifdef CONFIG_BNX2X_VXLAN + port = bp->vxlan_dst_port; + if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT, + &bp->sp_rtnl_state)) { + if (!bnx2x_vxlan_port_update(bp, port)) + netdev_info(bp->dev, "Added vxlan dest port %d", port); + else + bp->vxlan_dst_port = 0; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT, + &bp->sp_rtnl_state)) { + if (!bnx2x_vxlan_port_update(bp, 0)) { + netdev_info(bp->dev, + "Deleted vxlan dest port %d", port); + bp->vxlan_dst_port = 0; + vxlan_get_rx_port(bp->dev); + } + } +#endif + /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -12379,6 +12478,12 @@ static int bnx2x_open(struct net_device *dev) rc = bnx2x_nic_load(bp, LOAD_OPEN); if (rc) return rc; + +#ifdef CONFIG_BNX2X_VXLAN + if (IS_PF(bp)) + vxlan_get_rx_port(dev); +#endif + return 0; } @@ -12894,6 +12999,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, +#ifdef CONFIG_BNX2X_VXLAN + .ndo_add_vxlan_port = bnx2x_add_vxlan_port, + .ndo_del_vxlan_port = bnx2x_del_vxlan_port, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 0612b19f6313..506047c38607 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) if (!next_cmpl->valid) break; } + packets++; /* TODO: BNA_CQ_EF_LOCAL ? */ if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | @@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); - packets++; rcb->rxq->rx_packets++; rcb->rxq->rx_bytes += totlen; ccb->bytes_per_intr += totlen; diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 358442087878..9b35d142f47a 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" depends on 64BIT - default ARCH_THUNDER select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. @@ -29,14 +28,12 @@ config THUNDER_NIC_PF config THUNDER_NIC_VF tristate "Thunder Virtual function driver" depends on 64BIT - default ARCH_THUNDER ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT - default ARCH_THUNDER select PHYLIB select MDIO_OCTEON ---help--- diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index b961a89dc626..5e541862f65e 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -6,6 +6,7 @@ * as published by the Free Software Foundation. */ +#include #include #include #include @@ -26,7 +27,7 @@ struct lmac { struct bgx *bgx; int dmac; - unsigned char mac[ETH_ALEN]; + u8 mac[ETH_ALEN]; bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ @@ -835,18 +836,108 @@ static void bgx_get_qlm_mode(struct bgx *bgx) } } -static void bgx_init_of(struct bgx *bgx, struct device_node *np) +#ifdef CONFIG_ACPI + +static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) { + u8 mac[ETH_ALEN]; + int ret; + + ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), + "mac-address", mac, ETH_ALEN); + if (ret) + goto out; + + if (!is_valid_ether_addr(mac)) { + ret = -EINVAL; + goto out; + } + + memcpy(dst, mac, ETH_ALEN); +out: + return ret; +} + +/* Currently only sets the MAC address. */ +static acpi_status bgx_acpi_register_phy(acpi_handle handle, + u32 lvl, void *context, void **rv) +{ + struct bgx *bgx = context; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + goto out; + + acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); + + SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); + + bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; +out: + bgx->lmac_count++; + return AE_OK; +} + +static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl, + void *context, void **ret_val) +{ + struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; + struct bgx *bgx = context; + char bgx_sel[5]; + + snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id); + if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) { + pr_warn("Invalid link device\n"); + return AE_OK; + } + + if (strncmp(string.pointer, bgx_sel, 4)) + return AE_OK; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + bgx_acpi_register_phy, NULL, bgx, NULL); + + kfree(string.pointer); + return AE_CTRL_TERMINATE; +} + +static int bgx_init_acpi_phy(struct bgx *bgx) +{ + acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL); + return 0; +} + +#else + +static int bgx_init_acpi_phy(struct bgx *bgx) +{ + return -ENODEV; +} + +#endif /* CONFIG_ACPI */ + +#if IS_ENABLED(CONFIG_OF_MDIO) + +static int bgx_init_of_phy(struct bgx *bgx) +{ + struct device_node *np; struct device_node *np_child; u8 lmac = 0; + char bgx_sel[5]; + const char *mac; + + /* Get BGX node from DT */ + snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); + np = of_find_node_by_name(NULL, bgx_sel); + if (!np) + return -ENODEV; for_each_child_of_node(np, np_child) { - struct device_node *phy_np; - const char *mac; - - phy_np = of_parse_phandle(np_child, "phy-handle", 0); - if (phy_np) - bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); + struct device_node *phy_np = of_parse_phandle(np_child, + "phy-handle", 0); + if (!phy_np) + continue; + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); mac = of_get_mac_address(np_child); if (mac) @@ -858,6 +949,24 @@ static void bgx_init_of(struct bgx *bgx, struct device_node *np) if (lmac == MAX_LMAC_PER_BGX) break; } + return 0; +} + +#else + +static int bgx_init_of_phy(struct bgx *bgx) +{ + return -ENODEV; +} + +#endif /* CONFIG_OF_MDIO */ + +static int bgx_init_phy(struct bgx *bgx) +{ + if (!acpi_disabled) + return bgx_init_acpi_phy(bgx); + + return bgx_init_of_phy(bgx); } static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -865,8 +974,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err; struct device *dev = &pdev->dev; struct bgx *bgx = NULL; - struct device_node *np; - char bgx_sel[5]; u8 lmac; bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); @@ -902,10 +1009,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) bgx_vnic[bgx->bgx_id] = bgx; bgx_get_qlm_mode(bgx); - snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); - np = of_find_node_by_name(NULL, bgx_sel); - if (np) - bgx_init_of(bgx, np); + err = bgx_init_phy(bgx); + if (err) + goto err_enable; bgx_init_hw(bgx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 58de4443eac0..3c99454aac0a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -768,6 +768,10 @@ struct adapter { struct dentry *debugfs_root; u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ + u32 trace_rss; /* 1 implies that different RSS flit per filter is + * used per filter else if 0 default RSS flit is + * used for all 4 filters. + */ spinlock_t stats_lock; spinlock_t win0_lock ____cacheline_aligned_in_smp; @@ -1441,6 +1445,10 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); +int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, + int filter_index, int enable); +void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, + int filter_index, int *enabled); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); void t4_sge_decode_idma_state(struct adapter *adapter, int state); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index b6577349cf4e..0a87a3247464 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -346,11 +346,11 @@ static int cim_qcfg_show(struct seq_file *seq, void *v) if (is_t4(adap->params.chip)) { i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A, ARRAY_SIZE(obq_wr_t4), obq_wr_t4); - wr = obq_wr_t4; + wr = obq_wr_t4; } else { i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A, ARRAY_SIZE(obq_wr_t5), obq_wr_t5); - wr = obq_wr_t5; + wr = obq_wr_t5; } } if (i) @@ -1201,6 +1201,299 @@ static const struct file_operations mbox_debugfs_fops = { .write = mbox_write }; +static int mps_trc_show(struct seq_file *seq, void *v) +{ + int enabled, i; + struct trace_params tp; + unsigned int trcidx = (uintptr_t)seq->private & 3; + struct adapter *adap = seq->private - trcidx; + + t4_get_trace_filter(adap, &tp, trcidx, &enabled); + if (!enabled) { + seq_puts(seq, "tracer is disabled\n"); + return 0; + } + + if (tp.skip_ofst * 8 >= TRACE_LEN) { + dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n"); + return -EINVAL; + } + if (tp.port < 8) { + i = adap->chan_map[tp.port & 3]; + if (i >= MAX_NPORTS) { + dev_err(adap->pdev_dev, "tracer %u is assigned " + "to non-existing port\n", trcidx); + return -EINVAL; + } + seq_printf(seq, "tracer is capturing %s %s, ", + adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx"); + } else + seq_printf(seq, "tracer is capturing loopback %d, ", + tp.port - 8); + seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len, + tp.min_len); + seq_printf(seq, "packets captured %smatch filter\n", + tp.invert ? "do not " : ""); + + if (tp.skip_ofst) { + seq_puts(seq, "filter pattern: "); + for (i = 0; i < tp.skip_ofst * 2; i += 2) + seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]); + seq_putc(seq, '/'); + for (i = 0; i < tp.skip_ofst * 2; i += 2) + seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]); + seq_puts(seq, "@0\n"); + } + + seq_puts(seq, "filter pattern: "); + for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2) + seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]); + seq_putc(seq, '/'); + for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2) + seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]); + seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8); + return 0; +} + +static int mps_trc_open(struct inode *inode, struct file *file) +{ + return single_open(file, mps_trc_show, inode->i_private); +} + +static unsigned int xdigit2int(unsigned char c) +{ + return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10; +} + +#define TRC_PORT_NONE 0xff +#define TRC_RSS_ENABLE 0x33 +#define TRC_RSS_DISABLE 0x13 + +/* Set an MPS trace filter. Syntax is: + * + * disable + * + * to disable tracing, or + * + * interface qid= [snaplen=] [minlen=] [not] []... + * + * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one + * of the NIC's response qid obtained from sge_qinfo and pattern has the form + * + * [/][@] + * + * Up to 2 filter patterns can be specified. If 2 are supplied the first one + * must be anchored at 0. An omited mask is taken as a mask of 1s, an omitted + * anchor is taken as 0. + */ +static ssize_t mps_trc_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + int i, enable, ret; + u32 *data, *mask; + struct trace_params tp; + const struct inode *ino; + unsigned int trcidx; + char *s, *p, *word, *end; + struct adapter *adap; + u32 j; + + ino = file_inode(file); + trcidx = (uintptr_t)ino->i_private & 3; + adap = ino->i_private - trcidx; + + /* Don't accept input more than 1K, can't be anything valid except lots + * of whitespace. Well, use less. + */ + if (count > 1024) + return -EFBIG; + p = s = kzalloc(count + 1, GFP_USER); + if (!s) + return -ENOMEM; + if (copy_from_user(s, buf, count)) { + count = -EFAULT; + goto out; + } + + if (s[count - 1] == '\n') + s[count - 1] = '\0'; + + enable = strcmp("disable", s) != 0; + if (!enable) + goto apply; + + /* enable or disable trace multi rss filter */ + if (adap->trace_rss) + t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE); + else + t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE); + + memset(&tp, 0, sizeof(tp)); + tp.port = TRC_PORT_NONE; + i = 0; /* counts pattern nibbles */ + + while (p) { + while (isspace(*p)) + p++; + word = strsep(&p, " "); + if (!*word) + break; + + if (!strncmp(word, "qid=", 4)) { + end = (char *)word + 4; + ret = kstrtouint(end, 10, &j); + if (ret) + goto out; + if (!adap->trace_rss) { + t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j); + continue; + } + + switch (trcidx) { + case 0: + t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j); + break; + case 1: + t4_write_reg(adap, + MPS_TRC_FILTER1_RSS_CONTROL_A, j); + break; + case 2: + t4_write_reg(adap, + MPS_TRC_FILTER2_RSS_CONTROL_A, j); + break; + case 3: + t4_write_reg(adap, + MPS_TRC_FILTER3_RSS_CONTROL_A, j); + break; + } + continue; + } + if (!strncmp(word, "snaplen=", 8)) { + end = (char *)word + 8; + ret = kstrtouint(end, 10, &j); + if (ret || j > 9600) { +inval: count = -EINVAL; + goto out; + } + tp.snap_len = j; + continue; + } + if (!strncmp(word, "minlen=", 7)) { + end = (char *)word + 7; + ret = kstrtouint(end, 10, &j); + if (ret || j > TFMINPKTSIZE_M) + goto inval; + tp.min_len = j; + continue; + } + if (!strcmp(word, "not")) { + tp.invert = !tp.invert; + continue; + } + if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) { + if (word[8] < '0' || word[8] > '3' || word[9]) + goto inval; + tp.port = word[8] - '0' + 8; + continue; + } + if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) { + if (word[2] < '0' || word[2] > '3' || word[3]) + goto inval; + tp.port = word[2] - '0' + 4; + if (adap->chan_map[tp.port & 3] >= MAX_NPORTS) + goto inval; + continue; + } + if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) { + if (word[2] < '0' || word[2] > '3' || word[3]) + goto inval; + tp.port = word[2] - '0'; + if (adap->chan_map[tp.port] >= MAX_NPORTS) + goto inval; + continue; + } + if (!isxdigit(*word)) + goto inval; + + /* we have found a trace pattern */ + if (i) { /* split pattern */ + if (tp.skip_len) /* too many splits */ + goto inval; + tp.skip_ofst = i / 16; + } + + data = &tp.data[i / 8]; + mask = &tp.mask[i / 8]; + j = i; + + while (isxdigit(*word)) { + if (i >= TRACE_LEN * 2) { + count = -EFBIG; + goto out; + } + *data = (*data << 4) + xdigit2int(*word++); + if (++i % 8 == 0) + data++; + } + if (*word == '/') { + word++; + while (isxdigit(*word)) { + if (j >= i) /* mask longer than data */ + goto inval; + *mask = (*mask << 4) + xdigit2int(*word++); + if (++j % 8 == 0) + mask++; + } + if (i != j) /* mask shorter than data */ + goto inval; + } else { /* no mask, use all 1s */ + for ( ; i - j >= 8; j += 8) + *mask++ = 0xffffffff; + if (i % 8) + *mask = (1 << (i % 8) * 4) - 1; + } + if (*word == '@') { + end = (char *)word + 1; + ret = kstrtouint(end, 10, &j); + if (*end && *end != '\n') + goto inval; + if (j & 7) /* doesn't start at multiple of 8 */ + goto inval; + j /= 8; + if (j < tp.skip_ofst) /* overlaps earlier pattern */ + goto inval; + if (j - tp.skip_ofst > 31) /* skip too big */ + goto inval; + tp.skip_len = j - tp.skip_ofst; + } + if (i % 8) { + *data <<= (8 - i % 8) * 4; + *mask <<= (8 - i % 8) * 4; + i = (i + 15) & ~15; /* 8-byte align */ + } + } + + if (tp.port == TRC_PORT_NONE) + goto inval; + +apply: + i = t4_set_trace_filter(adap, &tp, trcidx, enable); + if (i) + count = i; +out: + kfree(s); + return count; +} + +static const struct file_operations mps_trc_debugfs_fops = { + .owner = THIS_MODULE, + .open = mps_trc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = mps_trc_write +}; + static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -1943,13 +2236,13 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); - int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); + int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4); int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4); int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4); int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); int i, r = (uintptr_t)v - 1; - int toe_idx = r - eth_entries; - int rdma_idx = toe_idx - toe_entries; + int iscsi_idx = r - eth_entries; + int rdma_idx = iscsi_idx - iscsi_entries; int ciq_idx = rdma_idx - rdma_entries; int ctrl_idx = ciq_idx - ciq_entries; int fq_idx = ctrl_idx - ctrl_entries; @@ -1965,8 +2258,12 @@ do { \ seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) +#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v) #define T(s, v) S3("u", s, tx[i].v) +#define TL(s, v) T3("lu", s, v) +#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v) #define R(s, v) S3("u", s, rx[i].v) +#define RL(s, v) R3("lu", s, v) if (r < eth_entries) { int base_qset = r * 4; @@ -2005,12 +2302,30 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); - } else if (toe_idx < toe_entries) { - const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4]; - const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4]; - int n = min(4, adap->sge.ofldqsets - 4 * toe_idx); + RL("RxPackets:", stats.pkts); + RL("RxCSO:", stats.rx_cso); + RL("VLANxtract:", stats.vlan_ex); + RL("LROmerged:", stats.lro_merged); + RL("LROpackets:", stats.lro_pkts); + RL("RxDrops:", stats.rx_drops); + TL("TSO:", tso); + TL("TxCSO:", tx_cso); + TL("VLANins:", vlan_ins); + TL("TxQFull:", q.stops); + TL("TxQRestarts:", q.restarts); + TL("TxMapErr:", mapping_err); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); - S("QType:", "TOE"); + } else if (iscsi_idx < iscsi_entries) { + const struct sge_ofld_rxq *rx = + &adap->sge.ofldrxq[iscsi_idx * 4]; + const struct sge_ofld_txq *tx = + &adap->sge.ofldtxq[iscsi_idx * 4]; + int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx); + + S("QType:", "iSCSI"); T("TxQ ID:", q.cntxt_id); T("TxQ size:", q.size); T("TxQ inuse:", q.in_use); @@ -2030,6 +2345,13 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImmPkts:", stats.imm); + RL("RxNoMem:", stats.nomem); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); + } else if (rdma_idx < rdma_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmarxq[rdma_idx * 4]; @@ -2052,6 +2374,13 @@ do { \ R("FL avail:", fl.avail); R("FL PIDX:", fl.pidx); R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxImmPkts:", stats.imm); + RL("RxNoMem:", stats.nomem); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLStarving:", fl.starving); + } else if (ciq_idx < ciq_entries) { const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4]; int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx); @@ -2067,6 +2396,9 @@ do { \ S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); S3("u", "Intr pktcnt:", adap->sge.counter_val[rx[i].rspq.pktcnt_idx]); + RL("RxAN:", stats.an); + RL("RxNoMem:", stats.nomem); + } else if (ctrl_idx < ctrl_entries) { const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4]; int n = min(4, adap->params.nports - 4 * ctrl_idx); @@ -2077,6 +2409,8 @@ do { \ T("TxQ inuse:", q.in_use); T("TxQ CIDX:", q.cidx); T("TxQ PIDX:", q.pidx); + TL("TxQFull:", q.stops); + TL("TxQRestarts:", q.restarts); } else if (fq_idx == 0) { const struct sge_rspq *evtq = &adap->sge.fw_evtq; @@ -2092,10 +2426,14 @@ do { \ adap->sge.counter_val[evtq->pktcnt_idx]); } #undef R +#undef RL #undef T +#undef TL #undef S +#undef R3 +#undef T3 #undef S3 -return 0; + return 0; } static int sge_queue_entries(const struct adapter *adap) @@ -2212,6 +2550,73 @@ static const struct file_operations mem_debugfs_fops = { .llseek = default_llseek, }; +static int tid_info_show(struct seq_file *seq, void *v) +{ + struct adapter *adap = seq->private; + const struct tid_info *t = &adap->tids; + enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + + if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) { + unsigned int sb; + + if (chip <= CHELSIO_T5) + sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4; + else + sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A); + + if (sb) { + seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1, + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u/%u\n", + atomic_read(&t->tids_in_use), + atomic_read(&t->hash_tids_in_use)); + } else if (adap->flags & FW_OFLD_CONN) { + seq_printf(seq, "TID range: %u..%u/%u..%u", + t->aftid_base, + t->aftid_end, + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u/%u\n", + atomic_read(&t->tids_in_use), + atomic_read(&t->hash_tids_in_use)); + } else { + seq_printf(seq, "TID range: %u..%u", + adap->tids.hash_base, + t->ntids - 1); + seq_printf(seq, ", in use: %u\n", + atomic_read(&t->hash_tids_in_use)); + } + } else if (t->ntids) { + seq_printf(seq, "TID range: 0..%u", t->ntids - 1); + seq_printf(seq, ", in use: %u\n", + atomic_read(&t->tids_in_use)); + } + + if (t->nstids) + seq_printf(seq, "STID range: %u..%u, in use: %u\n", + (!t->stid_base && + (chip <= CHELSIO_T5)) ? + t->stid_base + 1 : t->stid_base, + t->stid_base + t->nstids - 1, t->stids_in_use); + if (t->natids) + seq_printf(seq, "ATID range: 0..%u, in use: %u\n", + t->natids - 1, t->atids_in_use); + seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base, + t->ftid_base + t->nftids - 1); + if (t->nsftids) + seq_printf(seq, "SFTID range: %u..%u in use: %u\n", + t->sftid_base, t->sftid_base + t->nsftids - 2, + t->sftids_in_use); + if (t->ntids) + seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n", + t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A), + t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A)); + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tid_info); + static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { @@ -2596,6 +3001,10 @@ int t4_setup_debugfs(struct adapter *adap) { "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 }, { "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 }, { "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 }, + { "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, + { "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, + { "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, + { "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 }, { "l2t", &t4_l2t_fops, S_IRUSR, 0}, { "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 }, { "rss", &rss_debugfs_fops, S_IRUSR, 0 }, @@ -2625,6 +3034,7 @@ int t4_setup_debugfs(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, #endif + { "tids", &tid_info_debugfs_fops, S_IRUSR, 0}, { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 }, { "meminfo", &meminfo_fops, S_IRUSR, 0 }, }; @@ -2665,16 +3075,19 @@ int t4_setup_debugfs(struct adapter *adap) EXT_MEM1_SIZE_G(size)); } } else { - if (i & EXT_MEM_ENABLE_F) + if (i & EXT_MEM_ENABLE_F) { size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); add_debugfs_mem(adap, "mc", MEM_MC, EXT_MEM_SIZE_G(size)); + } } de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, &flash_debugfs_fops, adap->params.sf_size); debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR, adap->debugfs_root, &adap->use_bd); + debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR, + adap->debugfs_root, &adap->trace_rss); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 27e87b6baa45..f35dd2284d40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1548,7 +1548,7 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) t->stid_tab[stid].data = data; stid -= t->nstids; stid += t->sftid_base; - t->stids_in_use++; + t->sftids_in_use++; } spin_unlock_bh(&t->stid_lock); return stid; @@ -1573,10 +1573,14 @@ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) else bitmap_release_region(t->stid_bmap, stid, 2); t->stid_tab[stid].data = NULL; - if (family == PF_INET) - t->stids_in_use--; - else - t->stids_in_use -= 4; + if (stid < t->nstids) { + if (family == PF_INET) + t->stids_in_use--; + else + t->stids_in_use -= 4; + } else { + t->sftids_in_use--; + } spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@ -1654,20 +1658,25 @@ static void process_tid_release_list(struct work_struct *work) */ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) { - void *old; struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); - old = t->tid_tab[tid]; + WARN_ON(tid >= t->ntids); + + if (t->tid_tab[tid]) { + t->tid_tab[tid] = NULL; + if (t->hash_base && (tid >= t->hash_base)) + atomic_dec(&t->hash_tids_in_use); + else + atomic_dec(&t->tids_in_use); + } + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); if (likely(skb)) { - t->tid_tab[tid] = NULL; mk_tid_release(skb, chan, tid); t4_ofld_send(adap, skb); } else cxgb4_queue_tid_release(t, chan, tid); - if (old) - atomic_dec(&t->tids_in_use); } EXPORT_SYMBOL(cxgb4_remove_tid); @@ -1702,9 +1711,11 @@ static int tid_init(struct tid_info *t) spin_lock_init(&t->atid_lock); t->stids_in_use = 0; + t->sftids_in_use = 0; t->afree = NULL; t->atids_in_use = 0; atomic_set(&t->tids_in_use, 0); + atomic_set(&t->hash_tids_in_use, 0); /* Setup the free list for atid_tab and clear the stid bitmap. */ if (natids) { @@ -4814,6 +4825,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->params.offload = 0; } + if (is_offload(adapter)) { + if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { + u32 hash_base, hash_reg; + + if (chip <= CHELSIO_T5) { + hash_reg = LE_DB_TID_HASHBASE_A; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base / 4; + } else { + hash_reg = T6_LE_DB_HASH_TID_BASE_A; + hash_base = t4_read_reg(adapter, hash_reg); + adapter->tids.hash_base = hash_base; + } + } + } + /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index b27897d4f787..c3a8be5541e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -96,6 +96,7 @@ struct tid_info { unsigned long *stid_bmap; unsigned int nstids; unsigned int stid_base; + unsigned int hash_base; union aopen_entry *atid_tab; unsigned int natids; @@ -116,8 +117,12 @@ struct tid_info { spinlock_t stid_lock; unsigned int stids_in_use; + unsigned int sftids_in_use; + /* TIDs in the TCAM */ atomic_t tids_in_use; + /* TIDs in the HASH */ + atomic_t hash_tids_in_use; }; static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) @@ -147,7 +152,10 @@ static inline void cxgb4_insert_tid(struct tid_info *t, void *data, unsigned int tid) { t->tid_tab[tid] = data; - atomic_inc(&t->tids_in_use); + if (t->hash_base && (tid >= t->hash_base)) + atomic_inc(&t->hash_tids_in_use); + else + atomic_inc(&t->tids_in_use); } int cxgb4_alloc_atid(struct tid_info *t, void *data); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d4248d74f560..78f446c58422 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1424,18 +1424,17 @@ static void restart_ctrlq(unsigned long data) struct fw_wr_hdr *wr; unsigned int ndesc = skb->priority; /* previously saved */ - /* - * Write descriptors and free skbs outside the lock to limit + written += ndesc; + /* Write descriptors and free skbs outside the lock to limit * wait times. q->full is still set so new skbs will be queued. */ + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + txq_advance(&q->q, ndesc); spin_unlock(&q->sendq.lock); - wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; inline_tx_skb(skb, &q->q, wr); kfree_skb(skb); - written += ndesc; - txq_advance(&q->q, ndesc); if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { unsigned long old = q->q.stops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 91750ad580ae..ac368efe2862 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4264,6 +4264,119 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) } } +/** + * t4_set_trace_filter - configure one of the tracing filters + * @adap: the adapter + * @tp: the desired trace filter parameters + * @idx: which filter to configure + * @enable: whether to enable or disable the filter + * + * Configures one of the tracing filters available in HW. If @enable is + * %0 @tp is not examined and may be %NULL. The user is responsible to + * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register + */ +int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, + int idx, int enable) +{ + int i, ofst = idx * 4; + u32 data_reg, mask_reg, cfg; + u32 multitrc = TRCMULTIFILTER_F; + + if (!enable) { + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); + return 0; + } + + cfg = t4_read_reg(adap, MPS_TRC_CFG_A); + if (cfg & TRCMULTIFILTER_F) { + /* If multiple tracers are enabled, then maximum + * capture size is 2.5KB (FIFO size of a single channel) + * minus 2 flits for CPL_TRACE_PKT header. + */ + if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8))) + return -EINVAL; + } else { + /* If multiple tracers are disabled, to avoid deadlocks + * maximum packet capture size of 9600 bytes is recommended. + * Also in this mode, only trace0 can be enabled and running. + */ + multitrc = 0; + if (tp->snap_len > 9600 || idx) + return -EINVAL; + } + + if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 || + tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M || + tp->min_len > TFMINPKTSIZE_M) + return -EINVAL; + + /* stop the tracer we'll be changing */ + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); + + idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A); + data_reg = MPS_TRC_FILTER0_MATCH_A + idx; + mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + t4_write_reg(adap, data_reg, tp->data[i]); + t4_write_reg(adap, mask_reg, ~tp->mask[i]); + } + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst, + TFCAPTUREMAX_V(tp->snap_len) | + TFMINPKTSIZE_V(tp->min_len)); + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, + TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) | + (is_t4(adap->params.chip) ? + TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) : + T5_TFPORT_V(tp->port) | T5_TFEN_F | + T5_TFINVERTMATCH_V(tp->invert))); + + return 0; +} + +/** + * t4_get_trace_filter - query one of the tracing filters + * @adap: the adapter + * @tp: the current trace filter parameters + * @idx: which trace filter to query + * @enabled: non-zero if the filter is enabled + * + * Returns the current settings of one of the HW tracing filters. + */ +void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, + int *enabled) +{ + u32 ctla, ctlb; + int i, ofst = idx * 4; + u32 data_reg, mask_reg; + + ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst); + ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst); + + if (is_t4(adap->params.chip)) { + *enabled = !!(ctla & TFEN_F); + tp->port = TFPORT_G(ctla); + tp->invert = !!(ctla & TFINVERTMATCH_F); + } else { + *enabled = !!(ctla & T5_TFEN_F); + tp->port = T5_TFPORT_G(ctla); + tp->invert = !!(ctla & T5_TFINVERTMATCH_F); + } + tp->snap_len = TFCAPTUREMAX_G(ctlb); + tp->min_len = TFMINPKTSIZE_G(ctlb); + tp->skip_ofst = TFOFFSET_G(ctla); + tp->skip_len = TFLENGTH_G(ctla); + + ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx; + data_reg = MPS_TRC_FILTER0_MATCH_A + ofst; + mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + tp->mask[i] = ~t4_read_reg(adap, mask_reg); + tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; + } +} + /** * t4_pmtx_get_stats - returns the HW stats from PMTX * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e444dc4ebbd8..fc3044c8ac1c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1886,6 +1886,9 @@ #define TRCMULTIFILTER_F TRCMULTIFILTER_V(1U) #define MPS_TRC_RSS_CONTROL_A 0x9808 +#define MPS_TRC_FILTER1_RSS_CONTROL_A 0x9ff4 +#define MPS_TRC_FILTER2_RSS_CONTROL_A 0x9ffc +#define MPS_TRC_FILTER3_RSS_CONTROL_A 0xa004 #define MPS_T5_TRC_RSS_CONTROL_A 0xa00c #define RSSCONTROL_S 16 @@ -1894,6 +1897,59 @@ #define QUEUENUMBER_S 0 #define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S) +#define TFINVERTMATCH_S 24 +#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S) +#define TFINVERTMATCH_F TFINVERTMATCH_V(1U) + +#define TFEN_S 22 +#define TFEN_V(x) ((x) << TFEN_S) +#define TFEN_F TFEN_V(1U) + +#define TFPORT_S 18 +#define TFPORT_M 0xfU +#define TFPORT_V(x) ((x) << TFPORT_S) +#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M) + +#define TFLENGTH_S 8 +#define TFLENGTH_M 0x1fU +#define TFLENGTH_V(x) ((x) << TFLENGTH_S) +#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M) + +#define TFOFFSET_S 0 +#define TFOFFSET_M 0x1fU +#define TFOFFSET_V(x) ((x) << TFOFFSET_S) +#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M) + +#define T5_TFINVERTMATCH_S 25 +#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S) +#define T5_TFINVERTMATCH_F T5_TFINVERTMATCH_V(1U) + +#define T5_TFEN_S 23 +#define T5_TFEN_V(x) ((x) << T5_TFEN_S) +#define T5_TFEN_F T5_TFEN_V(1U) + +#define T5_TFPORT_S 18 +#define T5_TFPORT_M 0x1fU +#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S) +#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M) + +#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810 +#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820 + +#define TFMINPKTSIZE_S 16 +#define TFMINPKTSIZE_M 0x1ffU +#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S) +#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M) + +#define TFCAPTUREMAX_S 0 +#define TFCAPTUREMAX_M 0x3fffU +#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S) +#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M) + +#define MPS_TRC_FILTER0_MATCH_A 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80 +#define MPS_TRC_FILTER1_MATCH_A 0x9d00 + #define TP_RSS_CONFIG_A 0x7df0 #define TNL4TUPENIPV6_S 31 @@ -2732,10 +2788,15 @@ #define T6_LIPMISS_F T6_LIPMISS_V(1U) #define LE_DB_CONFIG_A 0x19c04 +#define LE_DB_SERVER_INDEX_A 0x19c18 +#define LE_DB_SRVR_START_INDEX_A 0x19c18 +#define LE_DB_ACT_CNT_IPV4_A 0x19c20 +#define LE_DB_ACT_CNT_IPV6_A 0x19c24 #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c #define LE_DB_TID_HASHBASE_A 0x19df8 +#define T6_LE_DB_HASH_TID_BASE_A 0x19df8 #define HASHEN_S 20 #define HASHEN_V(x) ((x) << HASHEN_S) diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 84b6a2b46aec..8b53f7d4bebf 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.83" +#define DRV_VERSION "2.3.0.12" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 @@ -191,6 +191,25 @@ struct enic { struct vnic_gen_stats gen_stats; }; +static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) +{ + struct enic *enic = vdev->priv; + + return enic->netdev; +} + +/* wrappers function for kernel log + * Make sure variable vdev of struct vnic_dev is available in the block where + * these macros are used + */ +#define vdev_info(args...) dev_info(&vdev->pdev->dev, args) +#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args) +#define vdev_err(args...) dev_err(&vdev->pdev->dev, args) + +#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args) +#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args) +#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args) + static inline struct device *enic_get_dev(struct enic *enic) { return &(enic->pdev->dev); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 8f646e4e968b..cb1fdc350bb2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2484,6 +2484,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } + err = vnic_devcmd_init(enic->vdev); + + if (err) + goto err_out_vnic_unregister; + #ifdef CONFIG_PCI_IOV /* Get number of subvnics */ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index 0daa1c7073cb..abeda2a9ea27 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -24,6 +24,7 @@ #include "vnic_dev.h" #include "vnic_cq.h" +#include "enic.h" void vnic_cq_free(struct vnic_cq *cq) { @@ -42,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - pr_err("Failed to hook CQ[%d] resource\n", index); + vdev_err("Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 62f7b7baf93c..19a49a6e3911 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -27,46 +27,9 @@ #include "vnic_resource.h" #include "vnic_devcmd.h" #include "vnic_dev.h" +#include "vnic_wq.h" #include "vnic_stats.h" - -enum vnic_proxy_type { - PROXY_NONE, - PROXY_BY_BDF, - PROXY_BY_INDEX, -}; - -struct vnic_res { - void __iomem *vaddr; - dma_addr_t bus_addr; - unsigned int count; -}; - -struct vnic_intr_coal_timer_info { - u32 mul; - u32 div; - u32 max_usec; -}; - -struct vnic_dev { - void *priv; - struct pci_dev *pdev; - struct vnic_res res[RES_TYPE_MAX]; - enum vnic_dev_intr_mode intr_mode; - struct vnic_devcmd __iomem *devcmd; - struct vnic_devcmd_notify *notify; - struct vnic_devcmd_notify notify_copy; - dma_addr_t notify_pa; - u32 notify_sz; - dma_addr_t linkstatus_pa; - struct vnic_stats *stats; - dma_addr_t stats_pa; - struct vnic_devcmd_fw_info *fw_info; - dma_addr_t fw_info_pa; - enum vnic_proxy_type proxy; - u32 proxy_index; - u64 args[VNIC_DEVCMD_NARGS]; - struct vnic_intr_coal_timer_info intr_coal_timer_info; -}; +#include "enic.h" #define VNIC_MAX_RES_HDR_SIZE \ (sizeof(struct vnic_resource_header) + \ @@ -90,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - pr_err("vNIC BAR0 res hdr length error\n"); + vdev_err("vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { - pr_err("vNIC BAR0 res hdr not mem-mapped\n"); + vdev_err("vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } @@ -106,11 +69,10 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { - pr_err("vNIC BAR0 res magic/version error " - "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", - VNIC_RES_MAGIC, VNIC_RES_VERSION, - MGMTVNIC_MAGIC, MGMTVNIC_VERSION, - ioread32(&rh->magic), ioread32(&rh->version)); + vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + VNIC_RES_MAGIC, VNIC_RES_VERSION, + MGMTVNIC_MAGIC, MGMTVNIC_VERSION, + ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } } @@ -144,17 +106,15 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - pr_err("vNIC BAR0 resource %d " - "out-of-bounds, offset 0x%x + " - "size 0x%x > bar len 0x%lx\n", - type, bar_offset, - len, - bar[bar_num].len); + vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + type, bar_offset, len, + bar[bar_num].len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: + case RES_TYPE_DEVCMD2: len = count; break; default: @@ -238,8 +198,8 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - pr_err("Failed to allocate ring (size=%d), aborting\n", - (int)ring->size); + vdev_err("Failed to allocate ring (size=%d), aborting\n", + (int)ring->size); return -ENOMEM; } @@ -281,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ENODEV; } if (status & STAT_BUSY) { - pr_err("Busy devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } @@ -315,8 +275,8 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d devcmd %d\n", - err, _CMD_N(cmd)); + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); return -err; } @@ -330,10 +290,160 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } } - pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); + vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } +static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result = dc2c->result + dc2c->next_result; + unsigned int i; + int delay, err; + u32 fetch_index, posted, new_posted; + + posted = ioread32(&dc2c->wq_ctrl->posted_index); + fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); + + if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) + return -ENODEV; + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + _CMD_N(cmd), fetch_index, posted); + return -EBUSY; + } + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + /* Adding write memory barrier prevents compiler and/or CPU reordering, + * thus avoiding descriptor posting before descriptor is initialized. + * Otherwise, hardware can read stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + for (delay = 0; delay < wait; delay++) { + if (result->color == dc2c->color) { + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + if (result->error) { + err = result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + vdev_neterr("Error %d devcmd %d\n", + err, _CMD_N(cmd)); + return -err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) + for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) + vdev->args[i] = result->results[i]; + + return 0; + } + udelay(100); + } + + vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd)); + + return -ETIMEDOUT; +} + +static int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return -ENODEV; + vdev->devcmd_rtn = _vnic_dev_cmd; + + return 0; +} + +static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + if (vdev->devcmd2) + return 0; + + vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL); + if (!vdev->devcmd2) + return -ENOMEM; + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, + DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + vdev_err("Fatal error in devcmd2 init - hardware surprise removal"); + + return -ENODEV; + } + + enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, + 0); + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_wq; + + vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; + vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_desc_ring; + + vdev->devcmd_rtn = _vnic_dev_cmd2; + + return 0; + +err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +err_free_wq: + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); +err_free_devcmd2: + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + + return err; +} + +static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); + kfree(vdev->devcmd2); +} + static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) @@ -348,7 +458,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, vdev->args[2] = *a0; vdev->args[3] = *a1; - err = _vnic_dev_cmd(vdev, proxy_cmd, wait); + err = vdev->devcmd_rtn(vdev, proxy_cmd, wait); if (err) return err; @@ -357,7 +467,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd)); + vdev_neterr("Error %d proxy devcmd %d\n", err, + _CMD_N(cmd)); return err; } @@ -375,7 +486,7 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, vdev->args[0] = *a0; vdev->args[1] = *a1; - err = _vnic_dev_cmd(vdev, cmd, wait); + err = vdev->devcmd_rtn(vdev, cmd, wait); *a0 = vdev->args[0]; *a1 = vdev->args[1]; @@ -650,7 +761,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - pr_err("Can't set packet filter\n"); + vdev_neterr("Can't set packet filter\n"); return err; } @@ -667,7 +778,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - pr_err("Can't add addr [%pM], %d\n", addr, err); + vdev_neterr("Can't add addr [%pM], %d\n", addr, err); return err; } @@ -684,7 +795,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - pr_err("Can't del addr [%pM], %d\n", addr, err); + vdev_neterr("Can't del addr [%pM], %d\n", addr, err); return err; } @@ -728,7 +839,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - pr_err("notify block %p still allocated", vdev->notify); + vdev_neterr("notify block %p still allocated", vdev->notify); return -EINVAL; } @@ -838,7 +949,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) memset(vdev->args, 0, sizeof(vdev->args)); if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT)) - err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait); + err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait); else err = ERR_ECMDUNKNOWN; @@ -847,7 +958,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - pr_warn("Using default conversion factor for interrupt coalesce timer\n"); + vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } @@ -938,6 +1049,9 @@ void vnic_dev_unregister(struct vnic_dev *vdev) pci_free_consistent(vdev->pdev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); + kfree(vdev); } } @@ -959,10 +1073,6 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, if (vnic_dev_discover_res(vdev, bar, num_bars)) goto err_out; - vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); - if (!vdev->devcmd) - goto err_out; - return vdev; err_out: @@ -977,6 +1087,29 @@ struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) } EXPORT_SYMBOL(vnic_dev_get_pdev); +int vnic_devcmd_init(struct vnic_dev *vdev) +{ + void __iomem *res; + int err; + + res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (res) { + err = vnic_dev_init_devcmd2(vdev); + if (err) + vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1", + err); + else + return 0; + } else { + vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); + } + err = vnic_dev_init_devcmd1(vdev); + if (err) + vdev_err("DEVCMD1 initialization failed: %d", err); + + return err; +} + int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { u64 a0, a1 = len; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 1fb214efceba..b013b6a78e87 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -70,7 +70,48 @@ struct vnic_dev_ring { unsigned int desc_avail; }; -struct vnic_dev; +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; + +struct vnic_res { + void __iomem *vaddr; + dma_addr_t bus_addr; + unsigned int count; +}; + +struct vnic_intr_coal_timer_info { + u32 mul; + u32 div; + u32 max_usec; +}; + +struct vnic_dev { + void *priv; + struct pci_dev *pdev; + struct vnic_res res[RES_TYPE_MAX]; + enum vnic_dev_intr_mode intr_mode; + struct vnic_devcmd __iomem *devcmd; + struct vnic_devcmd_notify *notify; + struct vnic_devcmd_notify notify_copy; + dma_addr_t notify_pa; + u32 notify_sz; + dma_addr_t linkstatus_pa; + struct vnic_stats *stats; + dma_addr_t stats_pa; + struct vnic_devcmd_fw_info *fw_info; + dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct vnic_intr_coal_timer_info intr_coal_timer_info; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); +}; + struct vnic_stats; void *vnic_dev_priv(struct vnic_dev *vdev); @@ -135,5 +176,6 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status); int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, struct filter *data); +int vnic_devcmd_init(struct vnic_dev *vdev); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 435d0cd96c22..2a812880b884 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -365,6 +365,12 @@ enum vnic_devcmd_cmd { */ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + /* Initialization for the devcmd2 interface. + * in: (u64) a0 = host result buffer physical address + * in: (u16) a1 = number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57), + /* Add a filter. * in: (u64) a0= filter address * (u32) a1= size of filter @@ -629,4 +635,26 @@ struct vnic_devcmd { u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ }; +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; + u8 error; + u8 color; +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c index 0ca107f7bc8c..942759d9cb3c 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_intr.c +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -25,6 +25,7 @@ #include "vnic_dev.h" #include "vnic_intr.h" +#include "enic.h" void vnic_intr_free(struct vnic_intr *intr) { @@ -39,7 +40,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - pr_err("Failed to hook INTR[%d].ctrl resource\n", index); + vdev_err("Failed to hook INTR[%d].ctrl resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h index e0a73f1ca6f4..4e45f88ac1d4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_resource.h +++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h @@ -48,6 +48,13 @@ enum vnic_res_type { RES_TYPE_RSVD7, RES_TYPE_DEVCMD, /* Device command region */ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ RES_TYPE_MAX, /* Count of resource types */ }; diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index c4b2183bf352..cce2777dfc41 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_rq.h" +#include "enic.h" static int vnic_rq_alloc_bufs(struct vnic_rq *rq) { @@ -91,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - pr_err("Failed to hook RQ[%d] resource\n", index); + vdev_err("Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -167,6 +168,7 @@ void vnic_rq_enable(struct vnic_rq *rq) int vnic_rq_disable(struct vnic_rq *rq) { unsigned int wait; + struct vnic_dev *vdev = rq->vdev; iowrite32(0, &rq->ctrl->enable); @@ -177,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - pr_err("Failed to disable RQ[%d]\n", rq->index); + vdev_neterr("Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index b5a1c937fad2..05ad16a7e872 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -26,6 +26,7 @@ #include "vnic_dev.h" #include "vnic_wq.h" +#include "enic.h" static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { @@ -94,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - pr_err("Failed to hook WQ[%d] resource\n", index); + vdev_err("Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -113,10 +114,27 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, return 0; } -static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, - unsigned int fetch_index, unsigned int posted_index, - unsigned int error_interrupt_enable, - unsigned int error_interrupt_offset) +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (!wq->ctrl) + return -EINVAL; + vnic_wq_disable(wq); + err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); + + return err; +} + +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; @@ -140,7 +158,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - vnic_wq_init_start(wq, cq_index, 0, 0, + enic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); } @@ -158,6 +176,7 @@ void vnic_wq_enable(struct vnic_wq *wq) int vnic_wq_disable(struct vnic_wq *wq) { unsigned int wait; + struct vnic_dev *vdev = wq->vdev; iowrite32(0, &wq->ctrl->enable); @@ -168,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - pr_err("Failed to disable WQ[%d]\n", wq->index); + vdev_neterr("Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 296154351823..8944af935a60 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -88,6 +88,17 @@ struct vnic_wq { unsigned int pkts_outstanding; }; +struct devcmd2_controller { + struct vnic_wq_ctrl __iomem *wq_ctrl; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; +}; + static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) { /* how many does SW own? */ @@ -174,5 +185,11 @@ void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); +int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); #endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 36d835bd5f3c..7d178bdb112e 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -620,6 +620,11 @@ enum be_if_flags { BE_IF_FLAGS_VLAN_PROMISCUOUS |\ BE_IF_FLAGS_MCAST_PROMISCUOUS) +#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ + BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) + +#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) + /* An RX interface is an object with one or more MAC addresses and * filtering capabilities. */ struct be_cmd_req_if_create { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d86bc5d52246..15cc3a1f12ff 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) return 0; + /* if device is not running, copy MAC to netdev->dev_addr */ + if (!netif_running(netdev)) + goto done; + /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the @@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } - - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - dev_info(dev, "MAC address changed to %pM\n", mac); +done: + ether_addr_copy(netdev->dev_addr, addr->sa_data); + dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; err: dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); @@ -2452,10 +2456,24 @@ static void be_eq_clean(struct be_eq_obj *eqo) be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); } +/* Free posted rx buffers that were not used */ +static void be_rxq_clean(struct be_rx_obj *rxo) +{ + struct be_queue_info *rxq = &rxo->q; + struct be_rx_page_info *page_info; + + while (atomic_read(&rxq->used) > 0) { + page_info = get_rx_page_info(rxo); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + } + BUG_ON(atomic_read(&rxq->used)); + rxq->tail = 0; + rxq->head = 0; +} + static void be_rx_cq_clean(struct be_rx_obj *rxo) { - struct be_rx_page_info *page_info; - struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; @@ -2492,16 +2510,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) /* After cleanup, leave the CQ in unarmed state */ be_cq_notify(adapter, rx_cq->id, false, 0); - - /* Then free posted rx buffers that were not used */ - while (atomic_read(&rxq->used) > 0) { - page_info = get_rx_page_info(rxo); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - } - BUG_ON(atomic_read(&rxq->used)); - rxq->tail = 0; - rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -2581,8 +2589,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); + free_cpumask_var(eqo->affinity_mask); } - free_cpumask_var(eqo->affinity_mask); be_queue_free(adapter, &eqo->q); } } @@ -2599,13 +2607,7 @@ static int be_evt_queues_create(struct be_adapter *adapter) for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); - if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) - return -ENOMEM; - cpumask_set_cpu(cpumask_local_spread(i, numa_node), - eqo->affinity_mask); - netif_napi_add(adapter->netdev, &eqo->napi, be_poll, - BE_NAPI_WEIGHT); - napi_hash_add(&eqo->napi); + aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->idx = i; @@ -2621,6 +2623,14 @@ static int be_evt_queues_create(struct be_adapter *adapter) rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; + + if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_set_cpu(cpumask_local_spread(i, numa_node), + eqo->affinity_mask); + netif_napi_add(adapter->netdev, &eqo->napi, be_poll, + BE_NAPI_WEIGHT); + napi_hash_add(&eqo->napi); } return 0; } @@ -3359,13 +3369,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) for_all_rx_queues(adapter, rxo, i) { q = &rxo->q; if (q->created) { + /* If RXQs are destroyed while in an "out of buffer" + * state, there is a possibility of an HW stall on + * Lancer. So, post 64 buffers to each queue to relieve + * the "out of buffer" condition. + * Make sure there's space in the RXQ before posting. + */ + if (lancer_chip(adapter)) { + be_rx_cq_clean(rxo); + if (atomic_read(&q->used) == 0) + be_post_rx_frags(rxo, GFP_KERNEL, + MAX_RX_POST); + } + be_cmd_rxq_destroy(adapter, q); be_rx_cq_clean(rxo); + be_rxq_clean(rxo); } be_queue_free(adapter, q); } } +static void be_disable_if_filters(struct be_adapter *adapter) +{ + be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id[0], 0); + + be_clear_uc_list(adapter); + + /* The IFACE flags are enabled in the open path and cleared + * in the close path. When a VF gets detached from the host and + * assigned to a VM the following happens: + * - VF's IFACE flags get cleared in the detach path + * - IFACE create is issued by the VF in the attach path + * Due to a bug in the BE3/Skyhawk-R FW + * (Lancer FW doesn't have the bug), the IFACE capability flags + * specified along with the IFACE create cmd issued by a VF are not + * honoured by FW. As a consequence, if a *new* driver + * (that enables/disables IFACE flags in open/close) + * is loaded in the host and an *old* driver is * used by a VM/VF, + * the IFACE gets created *without* the needed flags. + * To avoid this, disable RX-filter flags only for Lancer. + */ + if (lancer_chip(adapter)) { + be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF); + adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS; + } +} + static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3378,6 +3429,8 @@ static int be_close(struct net_device *netdev) if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; + be_disable_if_filters(adapter); + be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { @@ -3397,7 +3450,6 @@ static int be_close(struct net_device *netdev) be_tx_compl_clean(adapter); be_rx_qs_destroy(adapter); - be_clear_uc_list(adapter); for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) @@ -3482,6 +3534,31 @@ static int be_rx_qs_create(struct be_adapter *adapter) return 0; } +static int be_enable_if_filters(struct be_adapter *adapter) +{ + int status; + + status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); + if (status) + return status; + + /* For BE3 VFs, the PF programs the initial MAC address */ + if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr, + adapter->if_handle, + &adapter->pmac_id[0], 0); + if (status) + return status; + } + + if (adapter->vlans_added) + be_vid_config(adapter); + + be_set_rx_mode(adapter->netdev); + + return 0; +} + static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -3495,6 +3572,10 @@ static int be_open(struct net_device *netdev) if (status) goto err; + status = be_enable_if_filters(adapter); + if (status) + goto err; + status = be_irq_register(adapter); if (status) goto err; @@ -3685,16 +3766,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -static void be_mac_clear(struct be_adapter *adapter) -{ - if (adapter->pmac_id) { - be_cmd_pmac_del(adapter, adapter->if_handle, - adapter->pmac_id[0], 0); - kfree(adapter->pmac_id); - adapter->pmac_id = NULL; - } -} - #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { @@ -3769,8 +3840,8 @@ static int be_clear(struct be_adapter *adapter) #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); #endif - /* delete the primary mac along with the uc-mac list */ - be_mac_clear(adapter); + kfree(adapter->pmac_id); + adapter->pmac_id = NULL; be_cmd_if_destroy(adapter, adapter->if_handle, 0); @@ -3781,25 +3852,11 @@ static int be_clear(struct be_adapter *adapter) return 0; } -static int be_if_create(struct be_adapter *adapter, u32 *if_handle, - u32 cap_flags, u32 vf) -{ - u32 en_flags; - - en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | - BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | - BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - - en_flags &= cap_flags; - - return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf); -} - static int be_vfs_if_create(struct be_adapter *adapter) { struct be_resources res = {0}; + u32 cap_flags, en_flags, vf; struct be_vf_cfg *vf_cfg; - u32 cap_flags, vf; int status; /* If a FW profile exists, then cap_flags are updated */ @@ -3820,8 +3877,12 @@ static int be_vfs_if_create(struct be_adapter *adapter) } } - status = be_if_create(adapter, &vf_cfg->if_handle, - cap_flags, vf + 1); + en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST | + BE_IF_FLAGS_PASS_L3L4_ERRORS); + status = be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) return status; } @@ -4193,15 +4254,8 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); - } else { - /* Maybe the HW was reset; dev_addr must be re-programmed */ - memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } - /* For BE3-R VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) - be_cmd_pmac_add(adapter, mac, adapter->if_handle, - &adapter->pmac_id[0], 0); return 0; } @@ -4341,6 +4395,7 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; + u32 en_flags; int status; status = be_func_init(adapter); @@ -4363,8 +4418,11 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - status = be_if_create(adapter, &adapter->if_handle, - be_if_cap_flags(adapter), 0); + /* will enable all the needed filter flags in be_open() */ + en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + en_flags = en_flags & be_if_cap_flags(adapter); + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); if (status) goto err; @@ -4390,11 +4448,6 @@ static int be_setup(struct be_adapter *adapter) dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } - if (adapter->vlans_added) - be_vid_config(adapter); - - be_set_rx_mode(adapter->netdev); - status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 32e3807c650e..787da8e54e99 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) return 0; } -static int +static struct bufdesc * fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, struct net_device *ndev) @@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_sc = status; } - txq->cur_tx = bdp; - - return 0; - + return bdp; dma_mapping_error: bdp = txq->cur_tx; for (i = 0; i < frag; i++) { @@ -450,7 +447,7 @@ dma_mapping_error: dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } - return NETDEV_TX_OK; + return ERR_PTR(-ENOMEM); } static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, @@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, unsigned int estatus = 0; unsigned int index; int entries_free; - int ret; entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { @@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Fill in a Tx ring entry */ bdp = txq->cur_tx; + last_bdp = bdp; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (ret) - return ret; + last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); + if (IS_ERR(last_bdp)) + return NETDEV_TX_OK; } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = estatus; } - last_bdp = txq->cur_tx; index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ txq->tx_skbuff[index] = skb; @@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_tx_timestamp(skb); + /* Make sure the update to bdp and tx_skbuff are performed before + * cur_tx. + */ + wmb(); txq->cur_tx = bdp; /* Trigger transmission start */ @@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* get next bdp of dirty_tx */ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { - - /* current queue is empty */ - if (bdp == txq->cur_tx) + while (bdp != READ_ONCE(txq->cur_tx)) { + /* Order the load of cur_tx and cbd_sc */ + rmb(); + status = READ_ONCE(bdp->cbd_sc); + if (status & BD_ENET_TX_READY) break; index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); @@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); + /* Make sure the update to bdp and tx_skbuff are performed + * before dirty_tx + */ + wmb(); txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ @@ -1774,7 +1779,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) int ret = 0; ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; fep->mii_timeout = 0; @@ -1813,7 +1818,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, int ret = 0; ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; fep->mii_timeout = 0; @@ -2865,7 +2870,7 @@ fec_enet_open(struct net_device *ndev) int ret; ret = pm_runtime_get_sync(&fep->pdev->dev); - if (IS_ERR_VALUE(ret)) + if (ret < 0) return ret; pinctrl_pm_select_default_state(&fep->pdev->dev); @@ -3433,6 +3438,7 @@ fec_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 56316db6c5a6..cf8e54652df9 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) frag = skb_shinfo(skb)->frags; while (nr_frags) { CBDC_SC(bdp, - BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST | + BD_ENET_TX_TC); CBDS_SC(bdp, BD_ENET_TX_READY); if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index b34214e2df5f..016743e355de 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep) } #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) -#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) +#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF) #define FEC_RX_EVENT (FEC_ENET_RXF) #define FEC_TX_EVENT (FEC_ENET_TXF) #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 555e461b0cfe..6bdc89179b72 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -902,27 +902,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv) return 0; } -static int gfar_comp_asc(const void *a, const void *b) -{ - return memcmp(a, b, 4); -} - -static int gfar_comp_desc(const void *a, const void *b) -{ - return -memcmp(a, b, 4); -} - -static void gfar_swap(void *a, void *b, int size) -{ - u32 *_a = a; - u32 *_b = b; - - swap(_a[0], _b[0]); - swap(_a[1], _b[1]); - swap(_a[2], _b[2]); - swap(_a[3], _b[3]); -} - /* Write a mask to filer cache */ static void gfar_set_mask(u32 mask, struct filer_table *tab) { @@ -1272,310 +1251,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, return 0; } -/* Copy size filer entries */ -static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0], - struct gfar_filer_entry src[0], s32 size) -{ - while (size > 0) { - size--; - dst[size].ctrl = src[size].ctrl; - dst[size].prop = src[size].prop; - } -} - -/* Delete the contents of the filer-table between start and end - * and collapse them - */ -static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab) -{ - int length; - - if (end > MAX_FILER_CACHE_IDX || end < begin) - return -EINVAL; - - end++; - length = end - begin; - - /* Copy */ - while (end < tab->index) { - tab->fe[begin].ctrl = tab->fe[end].ctrl; - tab->fe[begin++].prop = tab->fe[end++].prop; - - } - /* Fill up with don't cares */ - while (begin < tab->index) { - tab->fe[begin].ctrl = 0x60; - tab->fe[begin].prop = 0xFFFFFFFF; - begin++; - } - - tab->index -= length; - return 0; -} - -/* Make space on the wanted location */ -static int gfar_expand_filer_entries(u32 begin, u32 length, - struct filer_table *tab) -{ - if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || - begin > MAX_FILER_CACHE_IDX) - return -EINVAL; - - gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]), - tab->index - length + 1); - - tab->index += length; - return 0; -} - -static int gfar_get_next_cluster_start(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_AND | RQFCR_CLE)) - return start; - } - return -1; -} - -static int gfar_get_next_cluster_end(int start, struct filer_table *tab) -{ - for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); - start++) { - if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) == - (RQFCR_CLE)) - return start; - } - return -1; -} - -/* Uses hardwares clustering option to reduce - * the number of filer table entries - */ -static void gfar_cluster_filer(struct filer_table *tab) -{ - s32 i = -1, j, iend, jend; - - while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) { - j = i; - while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) { - /* The cluster entries self and the previous one - * (a mask) must be identical! - */ - if (tab->fe[i].ctrl != tab->fe[j].ctrl) - break; - if (tab->fe[i].prop != tab->fe[j].prop) - break; - if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl) - break; - if (tab->fe[i - 1].prop != tab->fe[j - 1].prop) - break; - iend = gfar_get_next_cluster_end(i, tab); - jend = gfar_get_next_cluster_end(j, tab); - if (jend == -1 || iend == -1) - break; - - /* First we make some free space, where our cluster - * element should be. Then we copy it there and finally - * delete in from its old location. - */ - if (gfar_expand_filer_entries(iend, (jend - j), tab) == - -EINVAL) - break; - - gfar_copy_filer_entries(&(tab->fe[iend + 1]), - &(tab->fe[jend + 1]), jend - j); - - if (gfar_trim_filer_entries(jend - 1, - jend + (jend - j), - tab) == -EINVAL) - return; - - /* Mask out cluster bit */ - tab->fe[iend].ctrl &= ~(RQFCR_CLE); - } - } -} - -/* Swaps the masked bits of a1<>a2 and b1<>b2 */ -static void gfar_swap_bits(struct gfar_filer_entry *a1, - struct gfar_filer_entry *a2, - struct gfar_filer_entry *b1, - struct gfar_filer_entry *b2, u32 mask) -{ - u32 temp[4]; - temp[0] = a1->ctrl & mask; - temp[1] = a2->ctrl & mask; - temp[2] = b1->ctrl & mask; - temp[3] = b2->ctrl & mask; - - a1->ctrl &= ~mask; - a2->ctrl &= ~mask; - b1->ctrl &= ~mask; - b2->ctrl &= ~mask; - - a1->ctrl |= temp[1]; - a2->ctrl |= temp[0]; - b1->ctrl |= temp[3]; - b2->ctrl |= temp[2]; -} - -/* Generate a list consisting of masks values with their start and - * end of validity and block as indicator for parts belonging - * together (glued by ANDs) in mask_table - */ -static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *tab) -{ - u32 i, and_index = 0, block_index = 1; - - for (i = 0; i < tab->index; i++) { - - /* LSByte of control = 0 sets a mask */ - if (!(tab->fe[i].ctrl & 0xF)) { - mask_table[and_index].mask = tab->fe[i].prop; - mask_table[and_index].start = i; - mask_table[and_index].block = block_index; - if (and_index >= 1) - mask_table[and_index - 1].end = i - 1; - and_index++; - } - /* cluster starts and ends will be separated because they should - * hold their position - */ - if (tab->fe[i].ctrl & RQFCR_CLE) - block_index++; - /* A not set AND indicates the end of a depended block */ - if (!(tab->fe[i].ctrl & RQFCR_AND)) - block_index++; - } - - mask_table[and_index - 1].end = i - 1; - - return and_index; -} - -/* Sorts the entries of mask_table by the values of the masks. - * Important: The 0xFF80 flags of the first and last entry of a - * block must hold their position (which queue, CLusterEnable, ReJEct, - * AND) - */ -static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table, - struct filer_table *temp_table, u32 and_index) -{ - /* Pointer to compare function (_asc or _desc) */ - int (*gfar_comp)(const void *, const void *); - - u32 i, size = 0, start = 0, prev = 1; - u32 old_first, old_last, new_first, new_last; - - gfar_comp = &gfar_comp_desc; - - for (i = 0; i < and_index; i++) { - if (prev != mask_table[i].block) { - old_first = mask_table[start].start + 1; - old_last = mask_table[i - 1].end; - sort(mask_table + start, size, - sizeof(struct gfar_mask_entry), - gfar_comp, &gfar_swap); - - /* Toggle order for every block. This makes the - * thing more efficient! - */ - if (gfar_comp == gfar_comp_desc) - gfar_comp = &gfar_comp_asc; - else - gfar_comp = &gfar_comp_desc; - - new_first = mask_table[start].start + 1; - new_last = mask_table[i - 1].end; - - gfar_swap_bits(&temp_table->fe[new_first], - &temp_table->fe[old_first], - &temp_table->fe[new_last], - &temp_table->fe[old_last], - RQFCR_QUEUE | RQFCR_CLE | - RQFCR_RJE | RQFCR_AND); - - start = i; - size = 0; - } - size++; - prev = mask_table[i].block; - } -} - -/* Reduces the number of masks needed in the filer table to save entries - * This is done by sorting the masks of a depended block. A depended block is - * identified by gluing ANDs or CLE. The sorting order toggles after every - * block. Of course entries in scope of a mask must change their location with - * it. - */ -static int gfar_optimize_filer_masks(struct filer_table *tab) -{ - struct filer_table *temp_table; - struct gfar_mask_entry *mask_table; - - u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0; - s32 ret = 0; - - /* We need a copy of the filer table because - * we want to change its order - */ - temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL); - if (temp_table == NULL) - return -ENOMEM; - - mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1, - sizeof(struct gfar_mask_entry), GFP_KERNEL); - - if (mask_table == NULL) { - ret = -ENOMEM; - goto end; - } - - and_index = gfar_generate_mask_table(mask_table, tab); - - gfar_sort_mask_table(mask_table, temp_table, and_index); - - /* Now we can copy the data from our duplicated filer table to - * the real one in the order the mask table says - */ - for (i = 0; i < and_index; i++) { - size = mask_table[i].end - mask_table[i].start + 1; - gfar_copy_filer_entries(&(tab->fe[j]), - &(temp_table->fe[mask_table[i].start]), size); - j += size; - } - - /* And finally we just have to check for duplicated masks and drop the - * second ones - */ - for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - previous_mask = i++; - break; - } - } - for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) { - if (tab->fe[i].ctrl == 0x80) { - if (tab->fe[i].prop == tab->fe[previous_mask].prop) { - /* Two identical ones found! - * So drop the second one! - */ - gfar_trim_filer_entries(i, i, tab); - } else - /* Not identical! */ - previous_mask = i; - } - } - - kfree(mask_table); -end: kfree(temp_table); - return ret; -} - /* Write the bit-pattern from software's buffer to hardware registers */ static int gfar_write_filer_table(struct gfar_private *priv, struct filer_table *tab) @@ -1585,11 +1260,10 @@ static int gfar_write_filer_table(struct gfar_private *priv, return -EBUSY; /* Fill regular entries */ - for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); - i++) + for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++) gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); /* Fill the rest with fall-troughs */ - for (; i < MAX_FILER_IDX - 1; i++) + for (; i < MAX_FILER_IDX; i++) gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); /* Last entry must be default accept * because that's what people expect @@ -1623,7 +1297,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) { struct ethtool_flow_spec_container *j; struct filer_table *tab; - s32 i = 0; s32 ret = 0; /* So index is set to zero, too! */ @@ -1648,17 +1321,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv) } } - i = tab->index; - - /* Optimizations to save entries */ - gfar_cluster_filer(tab); - gfar_optimize_filer_masks(tab); - - pr_debug("\tSummary:\n" - "\tData on hardware: %d\n" - "\tCompression rate: %d%%\n", - tab->index, 100 - (100 * tab->index) / i); - /* Write everything to hardware */ ret = gfar_write_filer_table(priv, tab); if (ret == -EBUSY) { @@ -1724,13 +1386,14 @@ static int gfar_add_cls(struct gfar_private *priv, } process: + priv->rx_list.count++; ret = gfar_process_filer_changes(priv); if (ret) goto clean_list; - priv->rx_list.count++; return ret; clean_list: + priv->rx_list.count--; list_del(&temp->list); clean_mem: kfree(temp); diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index d2657a412768..068789e694c9 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1770,8 +1770,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb, dma_addr = pci_map_single(nic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); /* If we can't map the skb, have the upper layer try later */ - if (pci_dma_mapping_error(nic->pdev, dma_addr)) + if (pci_dma_mapping_error(nic->pdev, dma_addr)) { + dev_kfree_skb_any(skb); + skb = NULL; return -ENOMEM; + } /* * Use the last 4 bytes of the SKB payload packet as the CRC, used for @@ -2967,6 +2970,11 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->params.cbs.max * sizeof(struct cb), sizeof(u32), 0); + if (!nic->cbs_pool) { + netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n"); + err = -ENOMEM; + goto err_out_pool; + } netif_info(nic, probe, nic->netdev, "addr 0x%llx, irq %d, MAC addr %pM\n", (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), @@ -2974,6 +2982,8 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_out_pool: + unregister_netdev(netdev); err_out_free: e100_free(nic); err_out_iounmap: diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 26459853c6be..34c551e322eb 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -106,14 +106,14 @@ #define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ -#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 #define K1_ENTRY_LATENCY 0 #define K1_MIN_TIME 1 #define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ #define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ #define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ - +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fea1601f32a3..faf4b3f3d0b5 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -48,7 +48,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION +#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -1737,12 +1737,6 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; adapter->flags2 &= ~FLAG2_IS_DISCARDING; - - writel(0, rx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(rx_ring, 0); - else - writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -2447,12 +2441,6 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - - writel(0, tx_ring->head); - if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_tdt_wa(tx_ring, 0); - else - writel(0, tx_ring->tail); } /** @@ -2954,6 +2942,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + writel(0, tx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); + /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); /* Tx irq moderation */ @@ -3275,6 +3269,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + writel(0, rx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); + /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); if (adapter->netdev->features & NETIF_F_RXCSUM) @@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, cc); struct e1000_hw *hw = &adapter->hw; + u32 systimel_1, systimel_2, systimeh; cycle_t systim, systim_next; - /* SYSTIMH latching upon SYSTIML read does not work well. To fix that - * we don't want to allow overflow of SYSTIML and a change to SYSTIMH - * to occur between reads, so if we read a vale close to overflow, we - * wait for overflow to occur and read both registers when its safe. + /* SYSTIMH latching upon SYSTIML read does not work well. + * This means that if SYSTIML overflows after we read it but before + * we read SYSTIMH, the value of SYSTIMH has been incremented and we + * will experience a huge non linear increment in the systime value + * to fix that we test for overflow and if true, we re-read systime. */ - u32 systim_overflow_latch_fix = 0x3FFFFFFF; - - do { - systim = (cycle_t)er32(SYSTIML); - } while (systim > systim_overflow_latch_fix); - systim |= (cycle_t)er32(SYSTIMH) << 32; + systimel_1 = er32(SYSTIML); + systimeh = er32(SYSTIMH); + systimel_2 = er32(SYSTIML); + /* Check for overflow. If there was no overflow, use the values */ + if (systimel_1 < systimel_2) { + systim = (cycle_t)systimel_1; + systim |= (cycle_t)systimeh << 32; + } else { + /* There was an overflow, read again SYSTIMH, and use + * systimel_2 + */ + systimeh = er32(SYSTIMH); + systim = (cycle_t)systimel_2; + systim |= (cycle_t)systimeh << 32; + } if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { u64 incvalue, time_delta, rem, temp; @@ -6317,6 +6328,33 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) return retval; } + /* Ensure that the appropriate bits are set in LPI_CTRL + * for EEE in Sx + */ + if ((hw->phy.type >= e1000_phy_i217) && + adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { + u16 lpi_ctrl = 0; + + retval = hw->phy.ops.acquire(hw); + if (!retval) { + retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, + &lpi_ctrl); + if (!retval) { + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_100_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, + lpi_ctrl); + } + } + hw->phy.ops.release(hw); + } /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. @@ -6466,7 +6504,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm_locked(pdev, aspm_disable_flag); + e1000e_disable_aspm(pdev, aspm_disable_flag); pci_set_master(pdev); @@ -6744,7 +6782,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); err = pci_enable_device_mem(pdev); if (err) { diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index b24e5fee17f2..1d5e0b77062a 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -38,8 +38,8 @@ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ -#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ -#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ #define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ @@ -125,7 +125,6 @@ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) -#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 281fd8456146..0f97883c1493 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -79,10 +79,13 @@ #define I40E_MIN_MSIX 2 #define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */ #define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */ -#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */ +/* max 16 qps */ +#define i40e_default_queues_per_vmdq(pf) \ + (((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ -#define I40E_MAX_QUEUES_PER_TC 64 /* should be a power of 2 */ +#define i40e_pf_get_max_q_per_tc(pf) \ + (((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64) #define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #ifdef I40E_FCOE @@ -298,6 +301,7 @@ struct i40e_pf { #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) #define I40E_FLAG_NEED_LINK_UPDATE BIT_ULL(9) +#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) #ifdef I40E_FCOE #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ @@ -318,6 +322,12 @@ struct i40e_pf { #endif #define I40E_FLAG_PORT_ID_VALID BIT_ULL(28) #define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) +#define I40E_FLAG_RSS_AQ_CAPABLE BIT_ULL(31) +#define I40E_FLAG_HW_ATR_EVICT_CAPABLE BIT_ULL(32) +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE BIT_ULL(33) +#define I40E_FLAG_128_QP_RSS_CAPABLE BIT_ULL(34) +#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35) +#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38) #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) /* tracks features that get auto disabled by errors */ @@ -550,6 +560,7 @@ struct i40e_q_vector { cpumask_t affinity_mask; struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; + bool arm_wb_state; } ____cacheline_internodealigned_in_smp; /* lan device */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 9101f5c00f37..95d23bfbcbf1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -257,6 +257,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -821,8 +825,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -2179,6 +2187,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure { diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 167ca0d752ea..114dc6450183 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -54,6 +54,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -383,6 +392,169 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); + + status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40e_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); + + status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40e_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40e_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. @@ -769,6 +941,7 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) switch (hw->mac.type) { case I40E_MAC_XL710: + case I40E_MAC_X722: break; default: return I40E_ERR_DEVICE_NOT_SUPPORTED; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 857d294d2a45..3bb832a2ec51 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -76,6 +76,9 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, /* required last entry */ {0, } }; @@ -1547,7 +1550,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, */ qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); num_tc_qps = qcount / numtc; - num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); + num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); /* Setup queue offset/count for all TCs for given VSI */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { @@ -2905,6 +2908,9 @@ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + if (pf->flags & I40E_FLAG_IWARP_ENABLED) + val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + if (pf->flags & I40E_FLAG_PTP) val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; @@ -3195,6 +3201,13 @@ static irqreturn_t i40e_intr(int irq, void *data) (icr0 & I40E_PFINT_ICR0_SWINT_MASK)) pf->sw_int_count++; + if ((pf->flags & I40E_FLAG_IWARP_ENABLED) && + (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) { + ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK; + dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n"); + } + /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { @@ -7068,6 +7081,10 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->count = vsi->num_desc; tx_ring->size = 0; tx_ring->dcb_tc = 0; + if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) + tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; + if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7465,63 +7482,140 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) return err; } +/** + * i40e_config_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + bool pf_lut = false; + u8 *rss_lut; + int ret, i; + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); + if (!rss_lut) + return -ENOMEM; + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0; i < vsi->rss_table_size; i++) + rss_lut[i] = i % vsi->rss_size; + + ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + return ret; + } + + if (vsi->type == I40E_VSI_MAIN) + pf_lut = true; + + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut, + vsi->rss_table_size); + if (ret) + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + + return ret; +} + +/** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_pf *pf = vsi->back; + + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(vsi, seed); + + return 0; +} + +/** + * i40e_config_rss_reg - Prepare for RSS if used + * @pf: board private structure + * @seed: RSS hash seed + **/ +static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_hw *hw = &pf->hw; + u32 *seed_dw = (u32 *)seed; + u32 current_queue = 0; + u32 lut = 0; + int i, j; + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (current_queue == vsi->rss_size) + current_queue = 0; + lut |= ((current_queue) << (8 * j)); + current_queue++; + } + wr32(&pf->hw, I40E_PFQF_HLUT(i), lut); + } + i40e_flush(hw); + + return 0; +} + /** * i40e_config_rss - Prepare for RSS if used * @pf: board private structure **/ static int i40e_config_rss(struct i40e_pf *pf) { - u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1]; struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 seed[I40E_HKEY_ARRAY_SIZE]; struct i40e_hw *hw = &pf->hw; - u32 lut = 0; - int i, j; - u64 hena; u32 reg_val; + u64 hena; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]); + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); - hena |= I40E_DEFAULT_RSS_HENA; + hena |= i40e_pf_get_default_rss_hena(pf); + wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); - /* Check capability and Set table size and register per hw expectation*/ + /* Determine the RSS table size based on the hardware capabilities */ reg_val = rd32(hw, I40E_PFQF_CTL_0); - if (pf->rss_table_size == 512) - reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512; - else - reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512; + reg_val = (pf->rss_table_size == 512) ? + (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : + (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); wr32(hw, I40E_PFQF_CTL_0, reg_val); - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { - - /* The assumption is that lan qp count will be the highest - * qp count for any PF VSI that needs RSS. - * If multiple VSIs need RSS support, all the qp counts - * for those VSIs should be a power of 2 for RSS to work. - * If LAN VSI is the only consumer for RSS then this requirement - * is not necessary. - */ - if (j == vsi->rss_size) - j = 0; - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (j & - (BIT(pf->hw.func_caps.rss_table_entry_width) - 1)); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) - wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); - } - i40e_flush(hw); - - return 0; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed); + else + return i40e_config_rss_reg(pf, seed); } /** @@ -7762,9 +7856,8 @@ static int i40e_sw_init(struct i40e_pf *pf) } if (pf->hw.func_caps.vmdq) { - pf->flags |= I40E_FLAG_VMDQ_ENABLED; pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; - pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; + pf->flags |= I40E_FLAG_VMDQ_ENABLED; } #ifdef I40E_FCOE @@ -7782,6 +7875,14 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_MAX_VF_COUNT); } #endif /* CONFIG_PCI_IOV */ + if (pf->hw.mac.type == I40E_MAC_X722) { + pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE | + I40E_FLAG_128_QP_RSS_CAPABLE | + I40E_FLAG_HW_ATR_EVICT_CAPABLE | + I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | + I40E_FLAG_WB_ON_ITR_CAPABLE | + I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE; + } pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -8937,6 +9038,10 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, break; } + if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + (vsi->type == I40E_VSI_VMDQ2)) { + ret = i40e_vsi_config_rss(vsi); + } return vsi; err_rings: diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index ce986af213d2..9b83abc0e774 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -211,6 +211,74 @@ read_nvm_exit: return ret_code; } +/** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, + u32 offset, u16 words, void *data, + bool last_command) +{ + i40e_status ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + memset(&cmd_details, 0, sizeof(cmd_details)); + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** + * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM + * + * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + **/ +static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) +{ + i40e_status ret_code = I40E_ERR_TIMEOUT; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true); + *data = le16_to_cpu(*(__le16 *)data); + + return ret_code; +} + /** * i40e_read_nvm_word - Reads Shadow RAM * @hw: pointer to the HW structure @@ -222,6 +290,8 @@ read_nvm_exit: i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { + if (hw->mac.type == I40E_MAC_X722) + return i40e_read_nvm_word_aq(hw, offset, data); return i40e_read_nvm_word_srctl(hw, offset, data); } @@ -256,6 +326,63 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, return ret_code; } +/** + * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + i40e_status ret_code; + u16 read_size = *words; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + do { + /* Calculate number of bytes we should read in this step. + * FVL AQ do not allow to read more than one page at a time or + * to cross page boundaries. + */ + if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS) + read_size = min(*words, + (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS - + (offset % I40E_SR_SECTOR_SIZE_IN_WORDS))); + else + read_size = min((*words - words_read), + I40E_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size, + data + words_read, last_cmd); + if (ret_code) + goto read_nvm_buffer_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + +read_nvm_buffer_aq_exit: + *words = words_read; + return ret_code; +} + /** * i40e_read_nvm_buffer - Reads Shadow RAM buffer * @hw: pointer to the HW structure @@ -270,6 +397,8 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { + if (hw->mac.type == I40E_MAC_X722) + return i40e_read_nvm_buffer_aq(hw, offset, words, data); return i40e_read_nvm_buffer_srctl(hw, offset, words, data); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index d52a9f7873b0..dcb72a8ee8e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -61,6 +61,17 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err); +i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 522d6df51330..acae6c744bc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -3366,4 +3366,1933 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif + +#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ +#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 +#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) +#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ +#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 +#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 +#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) +#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 +#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) +#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 +#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) +#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ +#define I40E_MNGSB_FDS_START_BC_SHIFT 0 +#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) +#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 +#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) + +#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) +#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ +#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 +#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) + +#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 +#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) +#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 +#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) +#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 +#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 +#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) +#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 +#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) +#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 +#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 +#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 +#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 +#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 +#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) + +#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 +#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 +#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) +#define I40E_GL_FWSTS_FWROWD_SHIFT 8 +#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) +#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_CEQPART_MAX_INDEX 15 +#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 +#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) +#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 +#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) +#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 +#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) +#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) +#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) +#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 +#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) +#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 +#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) +#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 +#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) +#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 +#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) +#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 +#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) +#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 +#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) +#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 +#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) +#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) +#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) +#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 +#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) +#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 +#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) +#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 +#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) +#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 +#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) +#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 +#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) +#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 +#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) +#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 +#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) +#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) +#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 +#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 +#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 +#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) +#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 +#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) +#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 +#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 +#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) +#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 +#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) +#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 +#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 +#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 +#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) +#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) +#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) +#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 +#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) +#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 +#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) +#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 +#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) +#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 +#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) +#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 +#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) +#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 +#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) +#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) +#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 +#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) +#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) +#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 +#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) +#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) +#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 +#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) +#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 +#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) +#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 +#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) +#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 +#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) +#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 +#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) +#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 +#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) +#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) +#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 +#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) +#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 +#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) +#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 +#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 +#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) +#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 +#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) +#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 +#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) +#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 +#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) +#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 +#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) +#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 +#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) +#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 +#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) +#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 +#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 +#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 +#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 +#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 +#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 +#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 +#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 +#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 +#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) +#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 +#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) +#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ +#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 +#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) +#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 +#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 +#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) +#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VPLAN_QBASE_MAX_INDEX 127 +#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 +#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) +#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 +#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 +#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) +#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 +#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) +#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ +#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 +#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) +#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 +#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) +#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 +#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) +#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 +#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 +#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) +#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 +#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 +#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) +#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ +#define I40E_GLNVM_FLA_LOCKED_SHIFT 6 +#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) + +#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ +#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 +#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 +#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 +#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) +#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 +#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 +#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) +#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 +#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) +#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 +#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) +#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 +#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) +#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ +#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 +#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 +#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 +#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) +#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 +#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 +#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 +#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 +#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 +#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) +#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 +#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 +#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) +#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 +#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) +#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 +#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 +#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 +#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 +#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 +#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 +#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 +#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 +#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 +#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 +#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 +#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 +#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 +#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 +#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 +#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 +#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 +#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) +#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ +#define I40E_MNGSB_DADD_ADDR_SHIFT 0 +#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) +#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ +#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 +#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) +#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ +#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 +#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) +#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 +#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) +#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 +#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 +#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) +#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 +#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) +#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 +#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) +#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ +#define I40E_MNGSB_RDATA_DATA_SHIFT 0 +#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) +#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ +#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 +#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) +#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 +#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) +#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 +#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) +#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) +#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 +#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) +#define I40E_MNGSB_RHDR0_EH_SHIFT 31 +#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) +#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 +#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 +#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 +#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) +#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 +#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) +#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ +#define I40E_MNGSB_WDATA_DATA_SHIFT 0 +#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) +#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ +#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 +#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) +#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 +#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 +#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) +#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 +#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) +#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ +#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 +#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) +#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ +#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 +#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) + +#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 +#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) + +#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) +#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 +#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) + +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 +#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 +#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 +#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 +#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 +#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 +#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) +#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 +#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 +#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ +#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 +#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) +#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 +#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ +#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 +#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) +#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 +#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ +#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 +#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 +#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 +#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) +#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 +#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 +#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) +#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) +#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 +#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 +#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 +#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 +#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 +#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 +#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) +#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 +#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 +#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 +#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 +#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) +#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ +#define I40E_GLQF_APBVT_MAX_INDEX 2047 +#define I40E_GLQF_APBVT_APBVT_SHIFT 0 +#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) +#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ +#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 +#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 +#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) +#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 +#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 +#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) +#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ +#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 +#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) +#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 +#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) +/* Redefined for X722 family */ +#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ +#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 +#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 +#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 +#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 +#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) +#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 +#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) +#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ +#define I40E_PFQF_HREGION_MAX_INDEX 7 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) +#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 +#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) +#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 +#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) +#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 +#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) +#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 +#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) +#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 +#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) +#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 +#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) +#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 +#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 +#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) +#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 +#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 +#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) +#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HKEY_MAX_INDEX 12 +#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 +#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) +#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 +#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) +#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 +#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) +#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 +#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) +#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ +#define I40E_VSIQF_HLUT_MAX_INDEX 15 +#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 +#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) +#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 +#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) +#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 +#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) +#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 +#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) +#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 +#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) +#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 +#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 330e4ef43cd8..738aca68f665 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -853,15 +853,40 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) **/ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ + u16 flags = q_vector->tx.ring[0].flags; - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), - val); + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), val); + } else { + u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */ + I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } } /** @@ -1404,7 +1429,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel) { + if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && + (ipv4_tunnel)) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -1918,6 +1944,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) return budget; } + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { @@ -2011,6 +2040,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + /* HW ATR eviction will take care of removing filters on FIN + * and RST packets. + */ + if (th->fin || th->rst) + return; + } tx_ring->atr_count++; @@ -2066,6 +2102,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; + fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); fdir_desc->rsvd = cpu_to_le32(0); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); @@ -2273,11 +2312,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; @@ -2314,6 +2357,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags |= I40E_TX_FLAGS_IPV6; } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 429833c47245..f1385a1989fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -78,6 +78,18 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_2048 2048 @@ -253,6 +265,10 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) + /* stats structs */ struct i40e_queue_stats stats; struct u64_stats_sync syncp; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index a20128b82b62..61b6b114b4bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -47,6 +47,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ @@ -120,6 +125,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -502,7 +509,8 @@ struct i40e_hw { static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -599,14 +607,18 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; @@ -947,6 +959,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -965,15 +979,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, @@ -1026,6 +1049,10 @@ enum i40e_filter_program_desc_pcmd { #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index d29d4062addf..8a7607c6e142 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1177,9 +1177,14 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; - + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + } else { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + } vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index d5bd6f066921..c8022092d369 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -35,7 +35,6 @@ #define I40E_FW_API_VERSION_MAJOR 0x0001 #define I40E_FW_API_VERSION_MINOR 0x0004 -#define I40E_FW_API_VERSION_A0_MINOR 0x0000 struct i40e_aq_desc { __le16 flags; @@ -255,6 +254,10 @@ enum i40e_admin_queue_opc { /* Tunnel commands */ i40e_aqc_opc_add_udp_tunnel = 0x0B00, i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, /* Async Events */ i40e_aqc_opc_event_lan_overflow = 0x1001, @@ -819,8 +822,12 @@ struct i40e_aqc_vsi_properties_data { I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ u8 up_enable_bits; @@ -2089,6 +2096,46 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); + /* tunnel key structure 0x0B10 */ struct i40e_aqc_tunnel_key_structure_A0 { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 56c7e751149b..023d32d090ce 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -54,6 +54,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_SFP_X722: + case I40E_DEV_ID_1G_BASE_T_X722: + case I40E_DEV_ID_10G_BASE_T_X722: + hw->mac.type = I40E_MAC_X722; + break; + case I40E_DEV_ID_X722_VF: + case I40E_DEV_ID_X722_VF_HV: + hw->mac.type = I40E_MAC_X722_VF; + break; case I40E_DEV_ID_VF: case I40E_DEV_ID_VF_HV: hw->mac.type = I40E_MAC_VF; @@ -383,6 +392,169 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, return status; } +/** + * i40e_aq_get_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * @set: set true to set the table, false to get the table + * + * Internal function to get or set RSS look up table + **/ +static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, + u16 vsi_id, bool pf_lut, + u8 *lut, u16 lut_size, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_lut *cmd_resp = + (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_lut); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_lut); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); + + if (pf_lut) + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + else + cmd_resp->flags |= cpu_to_le16((u16) + ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & + I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); + + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut)); + + status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * get the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, + false); +} + +/** + * i40evf_aq_set_rss_lut + * @hw: pointer to the hardware structure + * @vsi_id: vsi fw index + * @pf_lut: for PF table set true, for VSI table set false + * @lut: pointer to the lut buffer provided by the caller + * @lut_size: size of the lut buffer + * + * set the RSS lookup table, PF or VSI type + **/ +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, + bool pf_lut, u8 *lut, u16 lut_size) +{ + return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); +} + +/** + * i40e_aq_get_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * @set: set true to set the key, false to get the key + * + * get the RSS key per VSI + **/ +static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key, + bool set) +{ + i40e_status status; + struct i40e_aq_desc desc; + struct i40e_aqc_get_set_rss_key *cmd_resp = + (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; + u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); + + if (set) + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_rss_key); + else + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_rss_key); + + /* Indirect command */ + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); + + cmd_resp->vsi_id = + cpu_to_le16((u16)((vsi_id << + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & + I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); + cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); + cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key)); + cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key)); + + status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL); + + return status; +} + +/** + * i40evf_aq_get_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + **/ +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); +} + +/** + * i40evf_aq_set_rss_key + * @hw: pointer to the hw struct + * @vsi_id: vsi fw index + * @key: pointer to key info struct + * + * set the RSS key per VSI + **/ +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 vsi_id, + struct i40e_aqc_get_set_rss_key_data *key) +{ + return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); +} + /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 856eb9d06595..55ae4b0f8192 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -63,6 +63,17 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + i40e_status i40e_set_mac_type(struct i40e_hw *hw); extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index 3cc737629bf7..2e2ccc1719b6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -3366,4 +3366,64 @@ #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) #define I40E_VFQF_HREGION_REGION_7_SHIFT 29 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#endif +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 60f88e4ad065..7309479a0764 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -366,15 +366,32 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) **/ static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) { - u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ + u16 flags = q_vector->tx.ring[0].flags; - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), - val); + if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { + u32 val; + + if (q_vector->arm_wb_state) + return; + + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), + val); + q_vector->arm_wb_state = true; + } else { + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; + /* allow 00 to be written to the index */ + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + } } /** @@ -1372,6 +1389,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) return budget; } + if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) + q_vector->arm_wb_state = false; + /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); i40e_update_enable_itr(vsi, q_vector); @@ -1508,11 +1528,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; + struct udphdr *oudph; + struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: + oudph = udp_hdr(skb); + oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; @@ -1551,6 +1575,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } + if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && + (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && + (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { + oudph->check = ~csum_tcpudp_magic(oiph->saddr, + oiph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, 0); + *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 6b47c818d1f0..9a30f5d8c089 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -78,6 +78,18 @@ enum i40e_dyn_idx_t { BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) +#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + +#define i40e_pf_get_default_rss_hena(pf) \ + (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ + I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) + /* Supported Rx Buffer Sizes */ #define I40E_RXBUFFER_512 512 /* Used for packet split */ #define I40E_RXBUFFER_2048 2048 @@ -250,6 +262,10 @@ struct i40e_ring { bool ring_active; /* is ring online or not */ bool arm_wb; /* do something to arm write back */ + u16 flags; +#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) +#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) + /* stats structs */ struct i40e_queue_stats stats; struct u64_stats_sync syncp; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 4ba9a012dcba..e32dc0b3616d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -47,6 +47,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_SFP_X722 0x37D0 +#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 +#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_X722_VF 0x37CD +#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ @@ -120,6 +125,8 @@ enum i40e_mac_type { I40E_MAC_X710, I40E_MAC_XL710, I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, I40E_MAC_GENERIC, }; @@ -496,7 +503,8 @@ struct i40e_hw { static inline bool i40e_is_vf(struct i40e_hw *hw) { - return hw->mac.type == I40E_MAC_VF; + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); } struct i40e_driver_version { @@ -593,14 +601,18 @@ enum i40e_rx_desc_status_bits { I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - I40E_RX_DESC_STATUS_PIF_SHIFT = 8, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ I40E_RX_DESC_STATUS_FLM_SHIFT = 11, I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18, + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; @@ -941,6 +953,8 @@ enum i40e_tx_ctx_desc_eipt_offload { #define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ I40E_TXD_CTX_QW0_DECTTL_SHIFT) +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) struct i40e_filter_program_desc { __le32 qindex_flex_ptype_vsi; __le32 rsvd; @@ -959,15 +973,24 @@ struct i40e_filter_program_desc { /* Packet Classifier Types for filters */ enum i40e_filter_pctype { - /* Note: Values 0-30 are reserved for future use */ + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - /* Note: Value 32 is reserved for future use */ + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-40 are reserved for future use */ + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index c33c7cce52fe..3817cbbf45e6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -101,6 +101,8 @@ struct i40e_vsi { #define MAX_RX_QUEUES 8 #define MAX_TX_QUEUES MAX_RX_QUEUES +#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ @@ -115,6 +117,7 @@ struct i40e_q_vector { u8 num_ringpairs; /* total number of ring pairs in vector */ int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; + bool arm_wb_state; cpumask_var_t affinity_mask; }; @@ -218,11 +221,15 @@ struct i40evf_adapter { #define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) #define I40EVF_FLAG_RESET_PENDING BIT(9) #define I40EVF_FLAG_RESET_NEEDED BIT(10) -/* duplcates for common code */ +#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) +#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) +/* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_IN_NETPOLL I40EVF_FLAG_IN_NETPOLL #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED +#define I40E_FLAG_WB_ON_ITR_CAPABLE I40EVF_FLAG_WB_ON_ITR_CAPABLE +#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE /* flags for admin queue service task */ u32 aq_required; #define I40EVF_FLAG_AQ_ENABLE_QUEUES BIT(0) @@ -234,6 +241,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) /* OS defined structs */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1503cad918d8..2a6063a3a14d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -49,6 +49,7 @@ static const char i40evf_copyright[] = */ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, /* required last entry */ {0, } }; @@ -1171,6 +1172,113 @@ out: return err; } +/** + * i40e_configure_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) +{ + struct i40e_aqc_get_set_rss_key_data rss_key; + struct i40evf_adapter *adapter = vsi->back; + struct i40e_hw *hw = &adapter->hw; + int ret = 0, i; + u8 *rss_lut; + + if (!vsi->id) + return; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", + adapter->current_op); + return; + } + + memset(&rss_key, 0, sizeof(rss_key)); + memcpy(&rss_key, seed, sizeof(rss_key)); + + rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); + if (!rss_lut) + return; + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) + rss_lut[i] = i % adapter->num_active_queues; + + ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return; + } + + ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, + (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); + if (ret) + dev_err(&adapter->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); +} + +/** + * i40e_configure_rss_reg - Prepare for RSS if used + * @adapter: board private structure + * @seed: RSS hash seed + **/ +static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, + const u8 *seed) +{ + struct i40e_hw *hw = &adapter->hw; + u32 *seed_dw = (u32 *)seed; + u32 cqueue = 0; + u32 lut = 0; + int i, j; + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); + + /* Populate the LUT with max no. PF queues in round robin fashion */ + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { + lut = 0; + for (j = 0; j < 4; j++) { + if (cqueue == adapter->num_active_queues) + cqueue = 0; + lut |= ((cqueue) << (8 * j)); + cqueue++; + } + wr32(hw, I40E_VFQF_HLUT(i), lut); + } + i40e_flush(hw); +} + +/** + * i40evf_configure_rss - Prepare for RSS + * @adapter: board private structure + **/ +static void i40evf_configure_rss(struct i40evf_adapter *adapter) +{ + struct i40e_hw *hw = &adapter->hw; + u8 seed[I40EVF_HKEY_ARRAY_SIZE]; + u64 hena; + + netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); + + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + hena = I40E_DEFAULT_RSS_HENA; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + if (RSS_AQ(adapter)) + i40evf_configure_rss_aq(&adapter->vsi, seed); + else + i40evf_configure_rss_reg(adapter, seed); +} + /** * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors * @adapter: board private structure to initialize @@ -1416,6 +1524,16 @@ static void i40evf_watchdog_task(struct work_struct *work) goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) { + /* This message goes straight to the firmware, not the + * PF, so we don't have to set current_op as we will + * not get a response through the ARQ. + */ + i40evf_configure_rss(adapter); + adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; + goto watchdog_done; + } + if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: @@ -1438,45 +1556,6 @@ restart_watchdog: schedule_work(&adapter->adminq_task); } -/** - * i40evf_configure_rss - Prepare for RSS - * @adapter: board private structure - **/ -static void i40evf_configure_rss(struct i40evf_adapter *adapter) -{ - u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1]; - struct i40e_hw *hw = &adapter->hw; - u32 cqueue = 0; - u32 lut = 0; - int i, j; - u64 hena; - - /* Hash type is configured by the PF - we just supply the key */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - - /* Fill out hash function seed */ - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]); - - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { - lut = 0; - for (j = 0; j < 4; j++) { - if (cqueue == adapter->num_active_queues) - cqueue = 0; - lut |= ((cqueue) << (8 * j)); - cqueue++; - } - wr32(hw, I40E_VFQF_HLUT(i), lut); - } - i40e_flush(hw); -} - #define I40EVF_RESET_WAIT_MS 10 #define I40EVF_RESET_WAIT_COUNT 500 /** @@ -2186,7 +2265,8 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); - i40evf_configure_rss(adapter); + if (!RSS_AQ(adapter)) + i40evf_configure_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; @@ -2211,6 +2291,13 @@ static void i40evf_init_task(struct work_struct *work) adapter->state = __I40EVF_DOWN; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + + if (RSS_AQ(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); + } else { + i40evf_configure_rss(adapter); + } return; restart: schedule_delayed_work(&adapter->init_task, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index d19256994e5c..7a73510e547c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -231,6 +231,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) /* Verify phy id and set remaining function pointers */ switch (phy->id) { case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1111_I_PHY_ID: @@ -243,7 +244,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) else phy->ops.get_cable_length = igb_get_cable_length_m88; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - /* Check if this PHY is confgured for media swap. */ + /* Check if this PHY is configured for media swap. */ if (phy->id == M88E1112_E_PHY_ID) { u16 data; @@ -266,6 +267,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) hw->mac.ops.check_for_link = igb_check_for_link_media_swap; } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = igb_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; @@ -897,6 +903,7 @@ out: **/ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { + struct e1000_phy_info *phy = &hw->phy; s32 ret_val; /* This isn't a true "hard" reset, but is the only reset @@ -913,7 +920,11 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) goto out; ret_val = igb_phy_sw_reset(hw); + if (ret_val) + goto out; + if (phy->id == M88E1512_E_PHY_ID) + ret_val = igb_initialize_M88E1512_phy(hw); out: return ret_val; } @@ -1587,6 +1598,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: ret_val = igb_copper_link_setup_m88_gen2(hw); break; @@ -2629,7 +2641,8 @@ s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) u16 phy_data; if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; if (!hw->dev_spec._82575.eee_disable) { @@ -2709,7 +2722,8 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status) /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (phy->id != M88E1543_E_PHY_ID)) + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) goto out; ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index f8684aa285be..b1915043bc0c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -604,6 +604,10 @@ #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define E1000_M88E1112_PAGE_ADDR 0x16 #define E1000_M88E1112_STATUS 0x01 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 /* PCI Express Control */ #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 @@ -861,6 +865,7 @@ #define M88_VENDOR 0x0141 #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 987c9de24764..23ec28f43f6d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1262,6 +1262,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I210_I_PHY_ID: reset_dsp = false; break; @@ -1270,9 +1272,9 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) reset_dsp = false; break; } - if (!reset_dsp) + if (!reset_dsp) { hw_dbg("Link taking longer than expected.\n"); - else { + } else { /* We didn't get link. * Reset the DSP and cross our fingers. */ @@ -1297,6 +1299,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw) if (hw->phy.type != e1000_phy_m88 || hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1543_E_PHY_ID || + hw->phy.id == M88E1512_E_PHY_ID || hw->phy.id == I210_I_PHY_ID) goto out; @@ -1737,6 +1741,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw) phy->cable_length = phy_data / (is_cm ? 100 : 1); break; case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, @@ -2188,6 +2193,90 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw) return 0; } +/** + * igb_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvel 1512 to work correctly with Avoton. + **/ +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = igb_phy_sw_reset(hw); + if (ret_val) { + hw_dbg("Error committing the PHY changes\n"); + return ret_val; + } + + /* msec_delay(1000); */ + usleep_range(1000, 2000); +out: + return ret_val; +} + /** * igb_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 7af4ffab0285..24d55edbb0e3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -61,6 +61,7 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, void igb_power_up_phy_copper(struct e1000_hw *hw); void igb_power_down_phy_copper(struct e1000_hw *hw); s32 igb_phy_init_script_igp3(struct e1000_hw *hw); +s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw); s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 6f0490d0e981..4af2870e49f8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -104,6 +104,8 @@ #define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ #define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ #define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ #define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ #define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ #define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index c2bd4f98a837..212d668dabb3 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, struct sk_buff *skb); int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); #ifdef CONFIG_IGB_HWMON void igb_sysfs_exit(struct igb_adapter *adapter); int igb_sysfs_init(struct igb_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b7b9c670bb3c..74262768b09b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3008,6 +3008,7 @@ static int igb_set_channels(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); unsigned int count = ch->combined_count; + unsigned int max_combined = 0; /* Verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) @@ -3018,11 +3019,13 @@ static int igb_set_channels(struct net_device *netdev, return -EINVAL; /* Verify the number of channels doesn't exceed hw limits */ - if (count > igb_max_channels(adapter)) + max_combined = igb_max_channels(adapter); + if (count > max_combined) return -EINVAL; if (count != adapter->rss_queues) { adapter->rss_queues = count; + igb_set_flag_queue_pairs(adapter, max_combined); /* Hardware has to reinitialize queues and interrupts to * match the new configuration. diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 41e274046896..1902ef8f4a0b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -179,6 +179,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); +static int igb_disable_sriov(struct pci_dev *dev); +static int igb_pci_disable_sriov(struct pci_dev *dev); #endif #ifdef CONFIG_PM @@ -1205,10 +1207,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; - if (!q_vector) + if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); - else + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { memset(q_vector, 0, size); + } if (!q_vector) return -ENOMEM; @@ -2645,7 +2651,11 @@ err_eeprom: if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); +#endif pci_iounmap(pdev, hw->hw_addr); err_ioremap: free_netdev(netdev); @@ -2805,14 +2815,14 @@ static void igb_remove(struct pci_dev *pdev) */ igb_release_hw_control(adapter); - unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif + unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@ -2847,7 +2857,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter) return; pci_sriov_set_totalvfs(pdev, 7); - igb_pci_enable_sriov(pdev, max_vfs); + igb_enable_sriov(pdev, max_vfs); #endif /* CONFIG_PCI_IOV */ } @@ -2888,6 +2898,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + igb_set_flag_queue_pairs(adapter, max_rss_queues); +} + +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) +{ + struct e1000_hw *hw = &adapter->hw; + /* Determine if we need to pair queues. */ switch (hw->mac.type) { case e1000_82575: @@ -2968,6 +2986,8 @@ static int igb_sw_init(struct igb_adapter *adapter) } #endif /* CONFIG_PCI_IOV */ + igb_probe_vfs(adapter); + igb_init_queue_configuration(adapter); /* Setup and initialize a copy of the hw vlan table array */ @@ -2980,8 +3000,6 @@ static int igb_sw_init(struct igb_adapter *adapter) return -ENOMEM; } - igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); @@ -7401,6 +7419,7 @@ static int igb_resume(struct device *dev) if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + rtnl_unlock(); return -ENOMEM; } @@ -7494,6 +7513,7 @@ static int igb_sriov_reinit(struct pci_dev *dev) igb_init_queue_configuration(adapter); if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c3a9392cbc19..5982f28d521a 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -405,7 +405,7 @@ static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin) wr32(E1000_CTRL_EXT, ctrl_ext); } -static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) +static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq) { static const u32 aux0_sel_sdp[IGB_N_SDP] = { AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3, @@ -424,6 +424,14 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1, TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1, }; + static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = { + TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0, + TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0, + }; + static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = { + TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, + TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, + }; static const u32 ts_sdp_sel_clr[IGB_N_SDP] = { TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1, TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1, @@ -445,11 +453,17 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin) tssdp &= ~AUX1_TS_SDP_EN; tssdp &= ~ts_sdp_sel_clr[pin]; - if (chan == 1) - tssdp |= ts_sdp_sel_tt1[pin]; - else - tssdp |= ts_sdp_sel_tt0[pin]; - + if (freq) { + if (chan == 1) + tssdp |= ts_sdp_sel_fc1[pin]; + else + tssdp |= ts_sdp_sel_fc0[pin]; + } else { + if (chan == 1) + tssdp |= ts_sdp_sel_tt1[pin]; + else + tssdp |= ts_sdp_sel_tt0[pin]; + } tssdp |= ts_sdp_en[pin]; wr32(E1000_TSSDP, tssdp); @@ -463,10 +477,10 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); struct e1000_hw *hw = &igb->hw; - u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh; + u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout; unsigned long flags; struct timespec ts; - int pin = -1; + int use_freq = 0, pin = -1; s64 ns; switch (rq->type) { @@ -511,40 +525,58 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.period.nsec; ns = timespec_to_ns(&ts); ns = ns >> 1; - if (on && ns < 500000LL) { - /* 2k interrupts per second is an awful lot. */ - return -EINVAL; + if (on && ns <= 70000000LL) { + if (ns < 8LL) + return -EINVAL; + use_freq = 1; } ts = ns_to_timespec(ns); if (rq->perout.index == 1) { - tsauxc_mask = TSAUXC_EN_TT1; - tsim_mask = TSINTR_TT1; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT1; + tsim_mask = TSINTR_TT1; + } trgttiml = E1000_TRGTTIML1; trgttimh = E1000_TRGTTIMH1; + freqout = E1000_FREQOUT1; } else { - tsauxc_mask = TSAUXC_EN_TT0; - tsim_mask = TSINTR_TT0; + if (use_freq) { + tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0; + tsim_mask = 0; + } else { + tsauxc_mask = TSAUXC_EN_TT0; + tsim_mask = TSINTR_TT0; + } trgttiml = E1000_TRGTTIML0; trgttimh = E1000_TRGTTIMH0; + freqout = E1000_FREQOUT0; } spin_lock_irqsave(&igb->tmreg_lock, flags); tsauxc = rd32(E1000_TSAUXC); tsim = rd32(E1000_TSIM); + if (rq->perout.index == 1) { + tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1); + tsim &= ~TSINTR_TT1; + } else { + tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0); + tsim &= ~TSINTR_TT0; + } if (on) { int i = rq->perout.index; - - igb_pin_perout(igb, i, pin); + igb_pin_perout(igb, i, pin, use_freq); igb->perout[i].start.tv_sec = rq->perout.start.sec; igb->perout[i].start.tv_nsec = rq->perout.start.nsec; igb->perout[i].period.tv_sec = ts.tv_sec; igb->perout[i].period.tv_nsec = ts.tv_nsec; wr32(trgttimh, rq->perout.start.sec); wr32(trgttiml, rq->perout.start.nsec); + if (use_freq) + wr32(freqout, ns); tsauxc |= tsauxc_mask; tsim |= tsim_mask; - } else { - tsauxc &= ~tsauxc_mask; - tsim &= ~tsim_mask; } wr32(E1000_TSAUXC, tsauxc); wr32(E1000_TSIM, tsim); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 95af14e139d7..686fa7184179 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -319,6 +319,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_ps_hdr_size, DMA_FROM_DEVICE); + buffer_info->dma = 0; skb_put(skb, hlen); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e6a9319c718..7906234c5164 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -248,8 +248,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; struct pci_dev *pdev; - /* determine whether to use the the parent device - */ + /* determine whether to use the parent device */ if (ixgbe_pcie_from_parent(&adapter->hw)) pdev = adapter->pdev->bus->parent->self; else diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index b6f424f3b1a8..4615a949381d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2015 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -3462,14 +3462,14 @@ struct ixgbe_info { #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF -#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P == 0) ? (0x4010) : (0x8010)) -#define IXGBE_KRM_LINK_CTRL_1(P) ((P == 0) ? (0x420C) : (0x820C)) -#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P == 0) ? (0x4634) : (0x8634)) -#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P == 0) ? (0x4638) : (0x8638)) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P == 0) ? (0x4B00) : (0x8B00)) -#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P == 0) ? (0x4E00) : (0x8E00)) -#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P == 0) ? (0x5520) : (0x9520)) -#define IXGBE_KRM_RX_ANA_CTL(P) ((P == 0) ? (0x5A00) : (0x9A00)) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) +#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) +#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) +#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) +#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 3e8b1bfb1f2e..d9884fd15b45 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -299,6 +301,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats { u64 tx_bytes; }; +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + struct mvpp2_port { u8 id; @@ -679,6 +690,9 @@ struct mvpp2_port { u32 pending_cause_rx; struct napi_struct napi; + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + /* Flags */ unsigned long flags; @@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu { /* Array of transmitted skb */ struct sk_buff **tx_skb; + /* Array of transmitted buffers' physical addresses */ + dma_addr_t *tx_buffs; + /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -913,8 +930,6 @@ struct mvpp2_bm_pool { /* Occupied buffers indicator */ atomic_t in_use; int in_use_thresh; - - spinlock_t lock; }; struct mvpp2_buff_hdr { @@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) } static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb) + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) { txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; + if (skb) + txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] = + tx_desc->buf_phys_addr; txq_pcpu->txq_put_index++; if (txq_pcpu->txq_put_index == txq_pcpu->size) txq_pcpu->txq_put_index = 0; @@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, bm_pool->pkt_size = 0; bm_pool->buf_num = 0; atomic_set(&bm_pool->in_use, 0); - spin_lock_init(&bm_pool->lock); return 0; } @@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool * mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, int pkt_size) { - unsigned long flags = 0; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; int num; @@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, return NULL; } - spin_lock_irqsave(&new_pool->lock, flags); - if (new_pool->type == MVPP2_BM_FREE) new_pool->type = type; @@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, if (num != pkts_num) { WARN(1, "pool %d: %d of %d allocated\n", new_pool->id, num, pkts_num); - /* We need to undo the bufs_add() allocations */ - spin_unlock_irqrestore(&new_pool->lock, flags); return NULL; } } @@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_bufsize_set(port->priv, new_pool, MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - spin_unlock_irqrestore(&new_pool->lock, flags); - return new_pool; } /* Initialize pools for swf */ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) { - unsigned long flags = 0; int rxq; if (!port->pool_long) { @@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_long) return -ENOMEM; - spin_lock_irqsave(&port->pool_long->lock, flags); port->pool_long->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_long->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); @@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) if (!port->pool_short) return -ENOMEM; - spin_lock_irqsave(&port->pool_short->lock, flags); port->pool_short->port_map |= (1 << port->id); - spin_unlock_irqrestore(&port->pool_short->lock, flags); for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_rxq_short_pool_set(port, rxq, @@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg) mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); } @@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, rxq->time_coal = usec; } -/* Set threshold for TX_DONE pkts coalescing */ -static void mvpp2_tx_done_pkts_coal_set(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - u32 val; - - for (queue = 0; queue < txq_number; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) & - MVPP2_TRANSMITTED_THRESH_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val); - } -} - /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, int i; for (i = 0; i < num; i++) { - struct mvpp2_tx_desc *tx_desc = txq->descs + - txq_pcpu->txq_get_index; + dma_addr_t buf_phys_addr = + txq_pcpu->tx_buffs[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; mvpp2_txq_inc_get(txq_pcpu); @@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, if (!skb) continue; - dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, - tx_desc->data_size, DMA_TO_DEVICE); + dma_unmap_single(port->dev->dev.parent, buf_phys_addr, + skb_headlen(skb), DMA_TO_DEVICE); dev_kfree_skb_any(skb); } } @@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, u32 cause) { - int queue = fls(cause >> 16) - 1; + int queue = fls(cause) - 1; return port->txqs[queue]; } @@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = this_cpu_ptr(txq->pcpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + /* Rx/Tx queue initialization/cleanup methods */ /* Allocate and initialize descriptors for aggr TXQ */ @@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * sizeof(*txq_pcpu->tx_skb), GFP_KERNEL); - if (!txq_pcpu->tx_skb) { - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_phys); - return -ENOMEM; - } + if (!txq_pcpu->tx_skb) + goto error; + + txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size * + sizeof(dma_addr_t), GFP_KERNEL); + if (!txq_pcpu->tx_buffs) + goto error; txq_pcpu->count = 0; txq_pcpu->reserved_num = 0; @@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port, } return 0; + +error: + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); + } + + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_phys); + + return -ENOMEM; } /* Free allocated TXQ resources */ @@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->tx_skb); + kfree(txq_pcpu->tx_buffs); } if (txq->descs) @@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev) } } +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS); + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << txq_number) - 1; + tx_todo = mvpp2_tx_done(port, cause); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + /* Main RX/TX processing routines */ /* Display more error info */ @@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ tx_desc->command = MVPP2_TXD_L_DESC; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); } } @@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, skb); + mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_desc->command = tx_cmd; - mvpp2_txq_inc_put(txq_pcpu, NULL); + mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { @@ -5255,6 +5323,17 @@ out: dev_kfree_skb_any(skb); } + /* Finalize TX processing */ + if (txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + return NETDEV_TX_OK; } @@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) netdev_err(dev, "tx fifo underrun error\n"); } -static void mvpp2_txq_done_percpu(void *arg) +static int mvpp2_poll(struct napi_struct *napi, int budget) { - struct mvpp2_port *port = arg; - u32 cause_rx_tx, cause_tx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); /* Rx/Tx cause register * @@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg) */ cause_rx_tx = mvpp2_read(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { @@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - /* Release TX descriptors */ - if (cause_tx) { - struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx); - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - if (txq_pcpu->count) - mvpp2_txq_done(port, txq, txq_pcpu); - } -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - - on_each_cpu(mvpp2_txq_done_percpu, port, 1); - - cause_rx_tx = mvpp2_read(port->priv, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ @@ -5561,6 +5621,8 @@ err_cleanup_rxqs: static int mvpp2_stop(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; mvpp2_stop_dev(port); mvpp2_phy_disconnect(port); @@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev) on_each_cpu(mvpp2_interrupts_mask, port, 1); free_irq(port->irq, port); + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, txq->done_pkts_coal = c->tx_max_coalesced_frames; } - on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1); return 0; } @@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, { struct device_node *phy_node; struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; struct net_device *dev; struct resource *res; const char *dt_mac_addr; @@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, int features; int phy_mode; int priv_common_regs_num = 2; - int err, i; + int err, i, cpu; dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, rxq_number); @@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev, } mvpp2_port_power_up(port); + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, + (unsigned long)dev); + } + netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; @@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = register_netdev(dev); if (err < 0) { dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_txq_pcpu; + goto err_free_port_pcpu; } netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); @@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, priv->port_list[id] = port; return 0; +err_free_port_pcpu: + free_percpu(port->pcpu); err_free_txq_pcpu: for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); @@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) free_percpu(port->txqs[i]->pcpu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e9d7d90363a8..27ca4596775a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -42,22 +42,24 @@ #define MLX5E_MAX_NUM_TC 8 -#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7 +#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd -#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024) +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 -#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 +#define MLX5E_LOG_INDIR_RQT_SIZE 0x7 +#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) +#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@ -92,6 +94,7 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { "lro_bytes", "rx_csum_good", "rx_csum_none", + "rx_csum_sw", "tx_csum_offload", "tx_queue_stopped", "tx_queue_wake", @@ -129,13 +132,14 @@ struct mlx5e_vport_stats { u64 lro_bytes; u64 rx_csum_good; u64 rx_csum_none; + u64 rx_csum_sw; u64 tx_csum_offload; u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; u64 rx_wqe_err; -#define NUM_VPORT_COUNTERS 31 +#define NUM_VPORT_COUNTERS 32 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -215,6 +219,7 @@ struct mlx5e_pport_stats { static const char rq_stats_strings[][ETH_GSTRING_LEN] = { "packets", "csum_none", + "csum_sw", "lro_packets", "lro_bytes", "wqe_err" @@ -223,10 +228,11 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = { struct mlx5e_rq_stats { u64 packets; u64 csum_none; + u64 csum_sw; u64 lro_packets; u64 lro_bytes; u64 wqe_err; -#define NUM_RQ_STATS 5 +#define NUM_RQ_STATS 6 }; static const char sq_stats_strings[][ETH_GSTRING_LEN] = { @@ -268,11 +274,12 @@ struct mlx5e_params { u16 tx_cq_moderation_usec; u16 tx_cq_moderation_pkts; u16 min_rx_wqes; - u16 rx_hash_log_tbl_sz; bool lro_en; u32 lro_wqe_sz; - u8 rss_hfunc; u16 tx_max_inline; + u8 rss_hfunc; + u8 toeplitz_hash_key[40]; + u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; }; enum { @@ -569,6 +576,8 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); + int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b549797b315f..bce912688ca8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -628,7 +628,7 @@ static int mlx5e_set_settings(struct net_device *netdev, u32 link_modes; u32 speed; u32 eth_proto_cap, eth_proto_admin; - u8 port_status; + enum mlx5_port_status ps; int err; speed = ethtool_cmd_speed(cmd); @@ -662,33 +662,42 @@ static int mlx5e_set_settings(struct net_device *netdev, if (link_modes == eth_proto_admin) goto out; - err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (err) { - netdev_err(netdev, "%s: set port eth proto admin failed: %d\n", - __func__, err); - goto out; - } + mlx5_query_port_admin_status(mdev, &ps); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); - err = mlx5_query_port_status(mdev, &port_status); - if (err) - goto out; - - if (port_status == MLX5_PORT_DOWN) - return 0; - - err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN); - if (err) - goto out; - err = mlx5_set_port_status(mdev, MLX5_PORT_UP); out: return err; } +static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return sizeof(priv->params.toeplitz_hash_key); +} + +static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) +{ + return MLX5E_INDIR_RQT_SIZE; +} + static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct mlx5e_priv *priv = netdev_priv(netdev); + if (indir) + memcpy(indir, priv->params.indirection_rqt, + sizeof(priv->params.indirection_rqt)); + + if (key) + memcpy(key, priv->params.toeplitz_hash_key, + sizeof(priv->params.toeplitz_hash_key)); + if (hfunc) *hfunc = priv->params.rss_hfunc; @@ -699,28 +708,60 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mlx5e_priv *priv = netdev_priv(dev); + bool close_open; int err = 0; - if (hfunc == ETH_RSS_HASH_NO_CHANGE) - return 0; - - if ((hfunc != ETH_RSS_HASH_XOR) && + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && + (hfunc != ETH_RSS_HASH_XOR) && (hfunc != ETH_RSS_HASH_TOP)) return -EINVAL; mutex_lock(&priv->state_lock); - priv->params.rss_hfunc = hfunc; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - mlx5e_close_locked(dev); - err = mlx5e_open_locked(dev); + if (indir) { + memcpy(priv->params.indirection_rqt, indir, + sizeof(priv->params.indirection_rqt)); + mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); } + close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) && + test_bit(MLX5E_STATE_OPENED, &priv->state); + if (close_open) + mlx5e_close_locked(dev); + + if (key) + memcpy(priv->params.toeplitz_hash_key, key, + sizeof(priv->params.toeplitz_hash_key)); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE) + priv->params.rss_hfunc = hfunc; + + if (close_open) + err = mlx5e_open_locked(priv->netdev); + mutex_unlock(&priv->state_lock); return err; } +static int mlx5e_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = priv->params.num_channels; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + static int mlx5e_get_tunable(struct net_device *dev, const struct ethtool_tunable *tuna, void *data) @@ -779,6 +820,42 @@ static int mlx5e_set_tunable(struct net_device *dev, return err; } +static void mlx5e_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause, + &pauseparam->tx_pause); + if (err) { + netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n", + __func__, err); + } +} + +static int mlx5e_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + if (pauseparam->autoneg) + return -EINVAL; + + err = mlx5_set_port_pause(mdev, + pauseparam->rx_pause ? 1 : 0, + pauseparam->tx_pause ? 1 : 0); + if (err) { + netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n", + __func__, err); + } + + return err; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -793,8 +870,13 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_coalesce = mlx5e_set_coalesce, .get_settings = mlx5e_get_settings, .set_settings = mlx5e_set_settings, + .get_rxfh_key_size = mlx5e_get_rxfh_key_size, + .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, + .get_rxnfc = mlx5e_get_rxnfc, .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, + .get_pauseparam = mlx5e_get_pauseparam, + .set_pauseparam = mlx5e_set_pauseparam, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 111427b33ec8..55166dd5b4ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -149,6 +149,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->lro_packets = 0; s->lro_bytes = 0; s->rx_csum_none = 0; + s->rx_csum_sw = 0; s->rx_wqe_err = 0; for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -156,6 +157,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->lro_packets += rq_stats->lro_packets; s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_sw += rq_stats->csum_sw; s->rx_wqe_err += rq_stats->wqe_err; for (j = 0; j < priv->params.num_tc; j++) { @@ -241,7 +243,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) /* Update calculated offload counters */ s->tx_csum_offload = s->tx_packets - tx_offload_none; - s->rx_csum_good = s->rx_packets - s->rx_csum_none; + s->rx_csum_good = s->rx_packets - s->rx_csum_none - + s->rx_csum_sw; mlx5e_update_pport_counters(priv); free_out: @@ -1174,27 +1177,32 @@ static int mlx5e_bits_invert(unsigned long a, int size) return inv; } +static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) +{ + int i; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + int ix = i; + + if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); + + ix = priv->params.indirection_rqt[ix]; + ix = ix % priv->params.num_channels; + MLX5_SET(rqtc, rqtc, rq_num[i], + test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn); + } +} + static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, enum mlx5e_rqt_ix rqt_ix) { - int i; - int log_sz; switch (rqt_ix) { case MLX5E_INDIRECTION_RQT: - log_sz = priv->params.rx_hash_log_tbl_sz; - for (i = 0; i < (1 << log_sz); i++) { - int ix = i; - - if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, log_sz); - - ix = ix % priv->params.num_channels; - MLX5_SET(rqtc, rqtc, rq_num[i], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn); - } + mlx5e_fill_indir_rqt_rqns(priv, rqtc); break; @@ -1214,13 +1222,10 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) u32 *in; void *rqtc; int inlen; - int log_sz; int sz; int err; - log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : - priv->params.rx_hash_log_tbl_sz; - sz = 1 << log_sz; + sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1241,19 +1246,16 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) return err; } -static int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) { struct mlx5_core_dev *mdev = priv->mdev; u32 *in; void *rqtc; int inlen; - int log_sz; int sz; int err; - log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 : - priv->params.rx_hash_log_tbl_sz; - sz = 1 << log_sz; + sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1301,7 +1303,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) ROUGH_MAX_L2_L3_HDR_SZ) >> 8); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, MLX5_CAP_ETH(priv->mdev, - lro_timer_supported_periods[3])); + lro_timer_supported_periods[2])); } static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) @@ -1611,7 +1613,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) rx_hash_toeplitz_key); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - netdev_rss_key_fill(rss_key, len); + memcpy(rss_key, priv->params.toeplitz_hash_key, len); } break; } @@ -1911,9 +1913,10 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, - int num_comp_vectors) + int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); + int i; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -1930,22 +1933,22 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; - priv->params.rx_hash_log_tbl_sz = - (order_base_2(num_comp_vectors) > - MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ? - order_base_2(num_comp_vectors) : - MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ; priv->params.num_tc = 1; priv->params.default_vlan_prio = 0; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; - priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap); + netdev_rss_key_fill(priv->params.toeplitz_hash_key, + sizeof(priv->params.toeplitz_hash_key)); + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + priv->params.indirection_rqt[i] = i % num_channels; + priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_comp_vectors; + priv->params.num_channels = num_channels; priv->default_vlan_prio = priv->params.default_vlan_prio; spin_lock_init(&priv->async_events_spinlock); @@ -2034,19 +2037,20 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) { struct net_device *netdev; struct mlx5e_priv *priv; - int ncv = mdev->priv.eq_table.num_comp_vectors; + int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors, + MLX5E_MAX_NUM_CHANNELS); int err; if (mlx5e_check_required_hca_cap(mdev)) return NULL; - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); return NULL; } - mlx5e_build_netdev_priv(mdev, netdev, ncv); + mlx5e_build_netdev_priv(mdev, netdev, nch); mlx5e_build_netdev(netdev); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9a9374131f5b..cf0098596e85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -111,10 +111,12 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct iphdr)); ipv6 = NULL; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; } else { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + sizeof(struct ipv6hdr)); ipv4 = NULL; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; } if (get_cqe_lro_tcppsh(cqe)) @@ -149,6 +151,38 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } +static inline bool is_first_ethertype_ip(struct sk_buff *skb) +{ + __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + + return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); +} + +static inline void mlx5e_handle_csum(struct net_device *netdev, + struct mlx5_cqe64 *cqe, + struct mlx5e_rq *rq, + struct sk_buff *skb) +{ + if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) + goto csum_none; + + if (likely(cqe->hds_ip_ext & CQE_L4_OK)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (is_first_ethertype_ip(skb)) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + rq->stats.csum_sw++; + } else { + goto csum_none; + } + + return; + +csum_none: + skb->ip_summed = CHECKSUM_NONE; + rq->stats.csum_none++; +} + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb) @@ -162,20 +196,12 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe); - skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; } - if (likely(netdev->features & NETIF_F_RXCSUM) && - (cqe->hds_ip_ext & CQE_L2_OK) && - (cqe->hds_ip_ext & CQE_L3_OK) && - (cqe->hds_ip_ext & CQE_L4_OK)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb->ip_summed = CHECKSUM_NONE; - rq->stats.csum_none++; - } + mlx5e_handle_csum(netdev, cqe, rq, skb); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 603a8b0908ee..03aabdd79abe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); + MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); + err = set_caps(dev, set_ctx, set_sz); query_ex: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 70147999f657..821caaab9bfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -216,22 +216,25 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, } EXPORT_SYMBOL_GPL(mlx5_set_port_proto); -int mlx5_set_port_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status) +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status) { u32 in[MLX5_ST_SZ_DW(paos_reg)]; u32 out[MLX5_ST_SZ_DW(paos_reg)]; memset(in, 0, sizeof(in)); + MLX5_SET(paos_reg, in, local_port, 1); MLX5_SET(paos_reg, in, admin_status, status); MLX5_SET(paos_reg, in, ase, 1); return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); } +EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status); -int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status) { u32 in[MLX5_ST_SZ_DW(paos_reg)]; u32 out[MLX5_ST_SZ_DW(paos_reg)]; @@ -239,14 +242,17 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) memset(in, 0, sizeof(in)); + MLX5_SET(paos_reg, in, local_port, 1); + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 0); if (err) return err; - *status = MLX5_GET(paos_reg, out, oper_status); + *status = MLX5_GET(paos_reg, out, admin_status); return err; } +EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, int *max_mtu, int *oper_mtu, u8 port) @@ -328,3 +334,45 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pptx, tx_pause); + MLX5_SET(pfcc_reg, in, pprx, rx_pause); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_pause); + +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 0); + if (err) + return err; + + if (rx_pause) + *rx_pause = MLX5_GET(pfcc_reg, out, pprx); + + if (tx_pause) + *tx_pause = MLX5_GET(pfcc_reg, out, pptx); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_pause); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 8d1080da49b5..2941d9c5ae48 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -12,7 +12,7 @@ config MLXSW_CORE config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" - depends on PCI && MLXSW_CORE + depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE default m ---help--- This is PCI bus implementation for Mellanox Technologies Switch ASICs. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index ad66ae44a0d7..09325b72d524 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -865,6 +865,16 @@ static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) return container_of(driver_priv, struct mlxsw_core, driver_priv); } +bool mlxsw_core_skb_transmit_busy(void *driver_priv, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); + + return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, + tx_info); +} +EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); + int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -1063,7 +1073,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, mlxsw_core->emad.tid - 1); mlxsw_core_buf_dump_dbg(mlxsw_core, mlxsw_core->emad.resp_skb->data, - skb->len); + mlxsw_core->emad.resp_skb->len); dev_kfree_skb(mlxsw_core->emad.resp_skb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 2280b319c362..165808471188 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -73,6 +73,9 @@ struct mlxsw_tx_info { bool is_emad; }; +bool mlxsw_core_skb_transmit_busy(void *driver_priv, + const struct mlxsw_tx_info *tx_info); + int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); @@ -177,6 +180,8 @@ struct mlxsw_bus { int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile); void (*fini)(void *bus_priv); + bool (*skb_transmit_busy)(void *bus_priv, + const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod, diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 4d0ac882bec3..ffd55d030ce2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -62,7 +62,7 @@ __mlxsw_item_offset(struct mlxsw_item *item, unsigned short index, if (item->offset % typesize != 0 || item->step % typesize != 0 || item->in_step_offset % typesize != 0) { - pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n", + pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n", item->name, item->offset, item->step, item->in_step_offset, typesize); BUG(); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 298ead5b42ca..a34f4742aa00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -667,6 +667,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, char *wqe; struct sk_buff *skb; struct mlxsw_rx_info rx_info; + u16 byte_count; int err; elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); @@ -686,7 +687,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); - skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe)); + byte_count = mlxsw_pci_cqe_byte_count_get(cqe); + if (mlxsw_pci_cqe_crc_get(cqe)) + byte_count -= ETH_FCS_LEN; + skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); put_new_skb: @@ -1439,6 +1443,15 @@ mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, return mlxsw_pci_sdq_get(mlxsw_pci, sdqn); } +static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, + const struct mlxsw_tx_info *tx_info) +{ + struct mlxsw_pci *mlxsw_pci = bus_priv; + struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); + + return !mlxsw_pci_queue_elem_info_producer_get(q); +} + static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -1621,11 +1634,12 @@ err_in_mbox_map: } static const struct mlxsw_bus mlxsw_pci_bus = { - .kind = "pci", - .init = mlxsw_pci_init, - .fini = mlxsw_pci_fini, - .skb_transmit = mlxsw_pci_skb_transmit, - .cmd_exec = mlxsw_pci_cmd_exec, + .kind = "pci", + .init = mlxsw_pci_init, + .fini = mlxsw_pci_fini, + .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, + .skb_transmit = mlxsw_pci_skb_transmit, + .cmd_exec = mlxsw_pci_cmd_exec, }; static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 887af8459149..1ef9664b4512 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -155,6 +155,12 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); */ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); +/* pci_cqe_crc + * Length include CRC. Indicates the length field includes + * the packet's CRC. + */ +MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); + /* pci_cqe_e * CQE with Error. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b5a72f8e78b1..096e1c12175a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -150,6 +150,64 @@ static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); } +/* SSPR - Switch System Port Record Register + * ----------------------------------------- + * Configures the system port to local port mapping. + */ +#define MLXSW_REG_SSPR_ID 0x2008 +#define MLXSW_REG_SSPR_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_sspr = { + .id = MLXSW_REG_SSPR_ID, + .len = MLXSW_REG_SSPR_LEN, +}; + +/* reg_sspr_m + * Master - if set, then the record describes the master system port. + * This is needed in case a local port is mapped into several system ports + * (for multipathing). That number will be reported as the source system + * port when packets are forwarded to the CPU. Only one master port is allowed + * per local port. + * + * Note: Must be set for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); + +/* reg_sspr_local_port + * Local port number. + * + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8); + +/* reg_sspr_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * + * Access: RW + */ +MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); + +/* reg_sspr_system_port + * Unique identifier within the stacking domain that represents all the ports + * that are available in the system (external ports). + * + * Currently, only single-ASIC configurations are supported, so we default to + * 1:1 mapping between system ports and local ports. + * Access: Index + */ +MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16); + +static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(sspr, payload); + mlxsw_reg_sspr_m_set(payload, 1); + mlxsw_reg_sspr_local_port_set(payload, local_port); + mlxsw_reg_sspr_sub_port_set(payload, 0); + mlxsw_reg_sspr_system_port_set(payload, local_port); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. @@ -1216,6 +1274,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SPAD"; case MLXSW_REG_SMID_ID: return "SMID"; + case MLXSW_REG_SSPR_ID: + return "SSPR"; case MLXSW_REG_SPMS_ID: return "SPMS"; case MLXSW_REG_SFGC_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 29b46eef9769..3e52ee93438c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -245,6 +245,16 @@ static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl); } +static int +mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port) +{ + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + char sspr_pl[MLXSW_REG_SSPR_LEN]; + + mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port); + return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl); +} + static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port, bool *p_usable) { @@ -290,37 +300,34 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, .local_port = mlxsw_sx_port->local_port, .is_emad = false, }; - struct sk_buff *skb_old = NULL; + u64 len; int err; - if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { - struct sk_buff *skb_new; + if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) + return NETDEV_TX_BUSY; - skb_old = skb; - skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); - if (!skb_new) { + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { + struct sk_buff *skb_orig = skb; + + skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); + if (!skb) { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); - dev_kfree_skb_any(skb_old); + dev_kfree_skb_any(skb_orig); return NETDEV_TX_OK; } - skb = skb_new; } mlxsw_sx_txhdr_construct(skb, &tx_info); + len = skb->len; + /* Due to a race we might fail here because of a full queue. In that + * unlikely case we simply drop the packet. + */ err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); - if (err == -EAGAIN) { - if (skb_old) - dev_kfree_skb_any(skb); - return NETDEV_TX_BUSY; - } - - if (skb_old) - dev_kfree_skb_any(skb_old); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; - pcpu_stats->tx_bytes += skb->len; + pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped); @@ -1001,6 +1008,13 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto port_not_usable; } + err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", + mlxsw_sx_port->local_port); + goto err_port_system_port_mapping_set; + } + err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n", @@ -1061,6 +1075,7 @@ err_port_stp_state_set: err_port_mtu_set: err_port_speed_set: err_port_swid_set: +err_port_system_port_mapping_set: port_not_usable: err_port_module_check: err_dev_addr_get: @@ -1079,6 +1094,7 @@ static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); free_percpu(mlxsw_sx_port->pcpu_stats); + free_netdev(mlxsw_sx_port->dev); } static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 055f3763e577..06bcc734fe8d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -24,9 +24,7 @@ #include #include #include - #include - #include #include #include @@ -39,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 62 -#define QLCNIC_LINUX_VERSIONID "5.3.62" +#define _QLCNIC_LINUX_SUBVERSION 63 +#define QLCNIC_LINUX_VERSIONID "5.3.63" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -926,6 +924,7 @@ struct qlcnic_mac_vlan_list { #define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5 #define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 +#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13 #define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0 #define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1 @@ -2291,8 +2290,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 #define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 -#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 +#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 +#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30 #define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 @@ -2319,7 +2319,8 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) (device == PCI_DEVICE_ID_QLOGIC_QLE8830) || (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || - (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; return status; } @@ -2335,7 +2336,8 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter) bool status; status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || - (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false; + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; return status; } @@ -2351,7 +2353,8 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter) { unsigned short device = adapter->pdev->device; - return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false; + return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false; } static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 840bf36b5e9d..5ab3adf88166 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -5,14 +5,15 @@ * See LICENSE.qlcnic for copyright and licensing details. */ -#include "qlcnic.h" -#include "qlcnic_sriov.h" #include #include #include #include #include +#include "qlcnic.h" +#include "qlcnic_sriov.h" + static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *); static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8); static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8, @@ -118,6 +119,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1}, + {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1}, }; const u32 qlcnic_83xx_ext_reg_tbl[] = { @@ -3513,6 +3515,31 @@ out: qlcnic_free_mbx_args(&cmd); } +#define QLCNIC_83XX_ADD_PORT0 BIT_0 +#define QLCNIC_83XX_ADD_PORT1 BIT_1 +#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */ +int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter) +{ + struct qlcnic_cmd_args cmd; + int err; + + err = qlcnic_alloc_mbx_args(&cmd, adapter, + QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP); + if (err) + return err; + + cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1); + cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE; + cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE; + + err = qlcnic_issue_cmd(adapter, &cmd); + if (err) + dev_err(&adapter->pdev->dev, + "failed to issue extend iSCSI minidump capability\n"); + + return err; +} + int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter) { u32 major, minor, sub; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 69f828eb42cf..331ae2c20f40 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -10,6 +10,7 @@ #include #include + #include "qlcnic_hw.h" #define QLCNIC_83XX_BAR0_LENGTH 0x4000 @@ -626,6 +627,7 @@ int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); +int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *); int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 753ea8bad953..bf892160dd5f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1384,7 +1384,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) size_t size; u64 addr; - temp = kzalloc(fw->size, GFP_KERNEL); + temp = vzalloc(fw->size); if (!temp) { release_firmware(fw); fw_info->fw = NULL; @@ -1430,7 +1430,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) exit: release_firmware(fw); fw_info->fw = NULL; - kfree(temp); + vfree(temp); return ret; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 75ee9e4ced51..509b596cf1e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -5,13 +5,13 @@ * See LICENSE.qlcnic for copyright and licensing details. */ -#include "qlcnic.h" -#include "qlcnic_hdr.h" - #include #include #include +#include "qlcnic.h" +#include "qlcnic_hdr.h" + #define MASK(n) ((1ULL<<(n))-1) #define OCM_WIN_P3P(addr) (addr & 0xffc0000) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index cbe2399c30a0..4bb33af8e2b3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -109,6 +109,7 @@ enum qlcnic_regs { #define QLCNIC_CMD_GET_LED_CONFIG 0x6A #define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F #define QLCNIC_CMD_ADD_RCV_RINGS 0x0B +#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37 #define QLCNIC_INTRPT_INTX 1 #define QLCNIC_INTRPT_MSIX 3 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 7dbab3c20db5..8b08b20e8b30 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -7,11 +7,6 @@ #include #include - -#include "qlcnic.h" -#include "qlcnic_sriov.h" -#include "qlcnic_hw.h" - #include #include #include @@ -25,6 +20,10 @@ #include #endif +#include "qlcnic.h" +#include "qlcnic_sriov.h" +#include "qlcnic_hw.h" + MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(QLCNIC_LINUX_VERSIONID); @@ -111,8 +110,9 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), - ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), + ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), {0,} @@ -1149,6 +1149,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: *bar = QLCNIC_83XX_BAR0_LENGTH; break; default: @@ -2491,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) qlcnic_83xx_register_map(ahw); break; case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: + case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: qlcnic_sriov_vf_register_map(ahw); break; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 332bb8a3f430..cda9e604a95f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -5,13 +5,13 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic.h" #include "qlcnic_hdr.h" #include "qlcnic_83xx_hw.h" #include "qlcnic_hw.h" -#include - #define QLC_83XX_MINIDUMP_FLASH 0x520000 #define QLC_83XX_OCM_INDEX 3 #define QLC_83XX_PCI_INDEX 0 @@ -1388,27 +1388,60 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) fw_dump->clr = 1; snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); netdev_info(adapter->netdev, - "Dump data %d bytes captured, template header size %d bytes\n", - fw_dump->size, fw_dump->tmpl_hdr_size); + "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n", + fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size, + fw_dump->tmpl_hdr); /* Send a udev event to notify availability of FW dump */ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); return 0; } +static inline bool +qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter) +{ + /* For special adapters (with 0x8830 device ID), where iSCSI firmware + * dump needs to be captured as part of regular firmware dump + * collection process, firmware exports it's capability through + * capability registers + */ + return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) && + (adapter->ahw->extra_capability[0] & + QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP)); +} + void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) { u32 prev_version, current_version; struct qlcnic_hardware_context *ahw = adapter->ahw; struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct pci_dev *pdev = adapter->pdev; + bool extended = false; prev_version = adapter->fw_version; current_version = qlcnic_83xx_get_fw_version(adapter); if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) { vfree(fw_dump->tmpl_hdr); + + if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) + extended = !qlcnic_83xx_extend_md_capab(adapter); + if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) dev_info(&pdev->dev, "Supports FW dump capability\n"); + + /* Once we have minidump template with extended iSCSI dump + * capability, update the minidump capture mask to 0x1f as + * per FW requirement + */ + if (extended) { + struct qlcnic_83xx_dump_template_hdr *hdr; + + hdr = fw_dump->tmpl_hdr; + hdr->drv_cap_mask = 0x1f; + fw_dump->cap_mask = 0x1f; + dev_info(&pdev->dev, + "Extended iSCSI dump capability and updated capture mask to 0x1f\n"); + } } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 4677b2edccca..017d8c2c8285 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -8,10 +8,11 @@ #ifndef _QLCNIC_83XX_SRIOV_H_ #define _QLCNIC_83XX_SRIOV_H_ -#include "qlcnic.h" #include #include +#include "qlcnic.h" + extern const u32 qlcnic_83xx_reg_tbl[]; extern const u32 qlcnic_83xx_ext_reg_tbl[]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index e6312465fe45..546cd5f1c85a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -5,10 +5,11 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic_sriov.h" #include "qlcnic.h" #include "qlcnic_83xx_hw.h" -#include #define QLC_BC_COMMAND 0 #define QLC_BC_RESPONSE 1 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index a29538b86edf..afd687e5e779 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -5,9 +5,10 @@ * See LICENSE.qlcnic for copyright and licensing details. */ +#include + #include "qlcnic_sriov.h" #include "qlcnic.h" -#include #define QLCNIC_SRIOV_VF_MAX_MAC 7 #define QLC_VF_MIN_TX_RATE 100 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 05c28f2c6df7..ccbb04503b27 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -7,10 +7,6 @@ #include #include - -#include "qlcnic.h" -#include "qlcnic_hw.h" - #include #include #include @@ -24,6 +20,9 @@ #include #endif +#include "qlcnic.h" +#include "qlcnic_hw.h" + int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) { return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3df51faf18ae..f790f61ea78a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; case RTL_GIGA_MAC_VER_49: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: - RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index b77e0e7307d4..a7cb74ac4758 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4264,6 +4264,16 @@ static int rocker_port_change_proto_down(struct net_device *dev, return 0; } +static void rocker_port_neigh_destroy(struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(n->dev); + int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *)n->primary_key; + + rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE, + flags, ip_addr, n->ha); +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, @@ -4278,6 +4288,7 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_get_phys_port_name = rocker_port_get_phys_port_name, .ndo_change_proto_down = rocker_port_change_proto_down, + .ndo_neigh_destroy = rocker_port_neigh_destroy, }; /******************** @@ -4544,6 +4555,7 @@ static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, if (found->key.pport != rocker_port->pport) continue; fdb->addr = found->key.addr; + fdb->ndm_state = NUD_REACHABLE; fdb->vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id); err = obj->cb(rocker_port->dev, obj); @@ -4926,6 +4938,7 @@ static void rocker_remove_ports(const struct rocker *rocker) rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, ROCKER_OP_FLAG_REMOVE); unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); } kfree(rocker->ports); } @@ -5181,7 +5194,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_probe_ports; } - dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id); + dev_info(&pdev->dev, "Rocker switch with id %*phN\n", + (int)sizeof(rocker->hw.id), &rocker->hw.id); return 0; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 959aeeade0c9..6eef3251d833 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -59,7 +59,9 @@ #include #include #include +#include #include +#include #include "smsc911x.h" @@ -2362,59 +2364,48 @@ static const struct smsc911x_ops shifted_smsc911x_ops = { .tx_writefifo = smsc911x_tx_writefifo_shift, }; -#ifdef CONFIG_OF -static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config, - struct device_node *np) +static int smsc911x_probe_config(struct smsc911x_platform_config *config, + struct device *dev) { - const char *mac; + int phy_interface; u32 width = 0; - if (!np) - return -ENODEV; + phy_interface = device_get_phy_mode(dev); + if (phy_interface < 0) + return phy_interface; - config->phy_interface = of_get_phy_mode(np); + config->phy_interface = phy_interface; - mac = of_get_mac_address(np); - if (mac) - memcpy(config->mac, mac, ETH_ALEN); + device_get_mac_address(dev, config->mac, ETH_ALEN); - of_property_read_u32(np, "reg-shift", &config->shift); + device_property_read_u32(dev, "reg-shift", &config->shift); - of_property_read_u32(np, "reg-io-width", &width); + device_property_read_u32(dev, "reg-io-width", &width); if (width == 4) config->flags |= SMSC911X_USE_32BIT; else config->flags |= SMSC911X_USE_16BIT; - if (of_get_property(np, "smsc,irq-active-high", NULL)) + if (device_property_present(dev, "smsc,irq-active-high")) config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH; - if (of_get_property(np, "smsc,irq-push-pull", NULL)) + if (device_property_present(dev, "smsc,irq-push-pull")) config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL; - if (of_get_property(np, "smsc,force-internal-phy", NULL)) + if (device_property_present(dev, "smsc,force-internal-phy")) config->flags |= SMSC911X_FORCE_INTERNAL_PHY; - if (of_get_property(np, "smsc,force-external-phy", NULL)) + if (device_property_present(dev, "smsc,force-external-phy")) config->flags |= SMSC911X_FORCE_EXTERNAL_PHY; - if (of_get_property(np, "smsc,save-mac-address", NULL)) + if (device_property_present(dev, "smsc,save-mac-address")) config->flags |= SMSC911X_SAVE_MAC_ADDRESS; return 0; } -#else -static inline int smsc911x_probe_config_dt( - struct smsc911x_platform_config *config, - struct device_node *np) -{ - return -ENODEV; -} -#endif /* CONFIG_OF */ static int smsc911x_drv_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct net_device *dev; struct smsc911x_data *pdata; struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); @@ -2478,7 +2469,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) goto out_disable_resources; } - retval = smsc911x_probe_config_dt(&pdata->config, np); + retval = smsc911x_probe_config(&pdata->config, &pdev->dev); if (retval && config) { /* copy config parameters across to pdata */ memcpy(&pdata->config, config, sizeof(pdata->config)); @@ -2654,6 +2645,12 @@ static const struct of_device_id smsc911x_dt_ids[] = { MODULE_DEVICE_TABLE(of, smsc911x_dt_ids); #endif +static const struct acpi_device_id smsc911x_acpi_match[] = { + { "ARMH9118", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match); + static struct platform_driver smsc911x_driver = { .probe = smsc911x_drv_probe, .remove = smsc911x_drv_remove, @@ -2661,6 +2658,7 @@ static struct platform_driver smsc911x_driver = { .name = SMSC_CHIPNAME, .pm = SMSC911X_PM_OPS, .of_match_table = of_match_ptr(smsc911x_dt_ids), + .acpi_match_table = ACPI_PTR(smsc911x_acpi_match), }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 333489f0fd24..9d89bdbf029f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -42,7 +42,7 @@ #define NSS_COMMON_CLK_DIV_MASK 0x7f #define NSS_COMMON_CLK_SRC_CTRL 0x14 -#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) +#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x) /* Mode is coded on 1 bit but is different depending on the MAC ID: * MAC0: QSGMII=0 RGMII=1 * MAC1: QSGMII=0 SGMII=0 RGMII=1 @@ -308,7 +308,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) /* Configure the clock src according to the mode */ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); - val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); + val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id)); switch (gmac->phy_mode) { case PHY_INTERFACE_MODE_RGMII: val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 3b81b39bea6f..8fc90f1c872c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -387,6 +387,9 @@ struct cpsw_priv { struct cpsw_ale *ale; bool rx_pause; bool tx_pause; + bool quirk_irq; + bool rx_irq_disabled; + bool tx_irq_disabled; /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; @@ -755,6 +758,11 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) writel(0, &priv->wr_regs->tx_en); cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); + if (priv->quirk_irq) { + disable_irq_nosync(priv->irqs_table[1]); + priv->tx_irq_disabled = true; + } + napi_schedule(&priv->napi_tx); return IRQ_HANDLED; } @@ -766,6 +774,11 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); writel(0, &priv->wr_regs->rx_en); + if (priv->quirk_irq) { + disable_irq_nosync(priv->irqs_table[0]); + priv->rx_irq_disabled = true; + } + napi_schedule(&priv->napi_rx); return IRQ_HANDLED; } @@ -779,6 +792,10 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) if (num_tx < budget) { napi_complete(napi_tx); writel(0xff, &priv->wr_regs->tx_en); + if (priv->quirk_irq && priv->tx_irq_disabled) { + priv->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } } if (num_tx) @@ -796,6 +813,10 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) if (num_rx < budget) { napi_complete(napi_rx); writel(0xff, &priv->wr_regs->rx_en); + if (priv->quirk_irq && priv->rx_irq_disabled) { + priv->rx_irq_disabled = false; + enable_irq(priv->irqs_table[0]); + } } if (num_rx) @@ -1267,6 +1288,16 @@ static int cpsw_ndo_open(struct net_device *ndev) napi_enable(&priv_sl0->napi_rx); napi_enable(&priv_sl0->napi_tx); + if (priv_sl0->tx_irq_disabled) { + priv_sl0->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } + + if (priv_sl0->rx_irq_disabled) { + priv_sl0->rx_irq_disabled = false; + enable_irq(priv->irqs_table[0]); + } + if (WARN_ON(!priv->data.rx_descs)) priv->data.rx_descs = 128; @@ -2128,6 +2159,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, return ret; } +#define CPSW_QUIRK_IRQ BIT(0) + +static struct platform_device_id cpsw_devtype[] = { + { + /* keep it for existing comaptibles */ + .name = "cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am335x-cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am4372-cpsw", + .driver_data = 0, + }, { + .name = "dra7-cpsw", + .driver_data = 0, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, cpsw_devtype); + +enum ti_cpsw_type { + CPSW = 0, + AM335X_CPSW, + AM4372_CPSW, + DRA7_CPSW, +}; + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], }, + { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], }, + { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], }, + { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + static int cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data; @@ -2137,6 +2206,7 @@ static int cpsw_probe(struct platform_device *pdev) struct cpsw_ale_params ale_params; void __iomem *ss_regs; struct resource *res, *ss_res; + const struct of_device_id *of_id; u32 slave_offset, sliver_offset, slave_size; int ret = 0, i; int irq; @@ -2327,6 +2397,13 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } + of_id = of_match_device(cpsw_of_mtable, &pdev->dev); + if (of_id) { + pdev->id_entry = of_id->data; + if (pdev->id_entry->driver_data) + priv->quirk_irq = true; + } + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. @@ -2491,12 +2568,6 @@ static int cpsw_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); -static const struct of_device_id cpsw_of_mtable[] = { - { .compatible = "ti,cpsw", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, cpsw_of_mtable); - static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index a8a730641bbb..bb1bb72121c0 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -85,7 +85,6 @@ struct netcp_intf { struct list_head rxhook_list_head; unsigned int rx_queue_id; void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; - u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN]; struct napi_struct rx_napi; struct napi_struct tx_napi; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 29ae672917b7..1a5aca55ea9f 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -34,6 +34,7 @@ #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) #define NETCP_NAPI_WEIGHT 64 #define NETCP_TX_TIMEOUT (5 * HZ) +#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) #define NETCP_MIN_PACKET_SIZE ETH_ZLEN #define NETCP_MAX_MCAST_ADDR 16 @@ -815,30 +816,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) if (likely(fdq == 0)) { unsigned int primary_buf_len; /* Allocate a primary receive queue entry */ - buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; + buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET; primary_buf_len = SKB_DATA_ALIGN(buf_len) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - if (primary_buf_len <= PAGE_SIZE) { - bufptr = netdev_alloc_frag(primary_buf_len); - pad[1] = primary_buf_len; - } else { - bufptr = kmalloc(primary_buf_len, GFP_ATOMIC | - GFP_DMA32 | __GFP_COLD); - pad[1] = 0; - } + bufptr = netdev_alloc_frag(primary_buf_len); + pad[1] = primary_buf_len; if (unlikely(!bufptr)) { - dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); + dev_warn_ratelimited(netcp->ndev_dev, + "Primary RX buffer alloc failed\n"); goto fail; } dma = dma_map_single(netcp->dev, bufptr, buf_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(netcp->dev, dma))) + goto fail; + pad[0] = (u32)bufptr; } else { /* Allocate a secondary receive queue entry */ - page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); + page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); if (unlikely(!page)) { dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); goto fail; @@ -1021,7 +1020,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp) /* Map the linear buffer */ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); - if (unlikely(!dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); return NULL; } @@ -1557,8 +1556,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) knav_queue_disable_notify(netcp->rx_queue); /* open Rx FDQs */ - for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && - netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { + for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i]; + ++i) { snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { @@ -1952,14 +1951,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device, netcp->rx_queue_depths[0] = 128; } - ret = of_property_read_u32_array(node_interface, "rx-buffer-size", - netcp->rx_buffer_sizes, - KNAV_DMA_FDQ_PER_CHAN); - if (ret) { - dev_err(dev, "missing \"rx-buffer-size\" parameter\n"); - netcp->rx_buffer_sizes[0] = 1536; - } - ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); if (ret < 0) { dev_err(dev, "missing \"rx-pool\" parameter\n"); diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h index 5924d4219e9e..4ca2341d7f06 100644 --- a/drivers/net/fddi/skfp/h/hwmtm.h +++ b/drivers/net/fddi/skfp/h/hwmtm.h @@ -74,15 +74,6 @@ #define NULL 0 #endif -#ifdef LITTLE_ENDIAN -#define HWM_REVERSE(x) (x) -#else -#define HWM_REVERSE(x) ((((x)<<24L)&0xff000000L) + \ - (((x)<< 8L)&0x00ff0000L) + \ - (((x)>> 8L)&0x0000ff00L) + \ - (((x)>>24L)&0x000000ffL)) -#endif - #define C_INDIC (1L<<25) #define A_INDIC (1L<<26) #define RD_FS_LOCAL 0x80 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 78d49d186e05..897e1a3f035b 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -283,7 +283,6 @@ static void geneve_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &geneve_type); - dev->tx_queue_len = 0; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; @@ -297,7 +296,7 @@ static void geneve_setup(struct net_device *dev) dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; } static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 2ffbf13471d0..216bfd350169 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty) dev->type = ARPHRD_AX25; /* Perform the low-level AX25 initialization. */ - if ((err = ax_open(ax->dev))) { + err = ax_open(ax->dev); + if (err) goto out_free_netdev; - } - if (register_netdev(dev)) + err = register_netdev(dev); + if (err) goto out_free_buffers; /* after register_netdev() - because else printk smashes the kernel */ diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5ce7020ca530..5fa98f599b3d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -162,6 +162,7 @@ struct netvsc_device_info { bool link_state; /* 0 - link up, 1 - link down */ int ring_size; u32 max_num_vrss_chns; + u32 num_chn; }; enum rndis_device_state { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7b36d5fecc1f..2990024b90f9 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -770,6 +770,104 @@ static void netvsc_get_channels(struct net_device *net, } } +static int netvsc_set_channels(struct net_device *net, + struct ethtool_channels *channels) +{ + struct net_device_context *net_device_ctx = netdev_priv(net); + struct hv_device *dev = net_device_ctx->device_ctx; + struct netvsc_device *nvdev = hv_get_drvdata(dev); + struct netvsc_device_info device_info; + u32 num_chn; + u32 max_chn; + int ret = 0; + bool recovering = false; + + if (!nvdev || nvdev->destroy) + return -ENODEV; + + num_chn = nvdev->num_chn; + max_chn = min_t(u32, nvdev->max_chn, num_online_cpus()); + + if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) { + pr_info("vRSS unsupported before NVSP Version 5\n"); + return -EINVAL; + } + + /* We do not support rx, tx, or other */ + if (!channels || + channels->rx_count || + channels->tx_count || + channels->other_count || + (channels->combined_count < 1)) + return -EINVAL; + + if (channels->combined_count > max_chn) { + pr_info("combined channels too high, using %d\n", max_chn); + channels->combined_count = max_chn; + } + + ret = netvsc_close(net); + if (ret) + goto out; + + do_set: + nvdev->start_remove = true; + rndis_filter_device_remove(dev); + + nvdev->num_chn = channels->combined_count; + + net_device_ctx->device_ctx = dev; + hv_set_drvdata(dev, net); + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */ + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = max_num_vrss_chns; + + ret = rndis_filter_device_add(dev, &device_info); + if (ret) { + if (recovering) { + netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); + return ret; + } + goto recover; + } + + nvdev = hv_get_drvdata(dev); + + ret = netif_set_real_num_tx_queues(net, nvdev->num_chn); + if (ret) { + if (recovering) { + netdev_err(net, "could not set tx queue count (ret %d)\n", ret); + return ret; + } + goto recover; + } + + ret = netif_set_real_num_rx_queues(net, nvdev->num_chn); + if (ret) { + if (recovering) { + netdev_err(net, "could not set rx queue count (ret %d)\n", ret); + return ret; + } + goto recover; + } + + out: + netvsc_open(net); + + return ret; + + recover: + /* If the above failed, we attempt to recover through the same + * process but with the original number of channels. + */ + netdev_err(net, "could not set channels, recovering\n"); + recovering = true; + channels->combined_count = num_chn; + goto do_set; +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -799,7 +897,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); + + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; + device_info.num_chn = nvdev->num_chn; device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); @@ -889,6 +990,7 @@ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, .get_channels = netvsc_get_channels, + .set_channels = netvsc_set_channels, }; static const struct net_device_ops device_ops = { @@ -1022,6 +1124,7 @@ static int netvsc_probe(struct hv_device *dev, net->needed_headroom = max_needed_headroom; /* Notify the netvsc driver of the new device */ + memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.max_num_vrss_chns = max_num_vrss_chns; ret = rndis_filter_device_add(dev, &device_info); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9b8263db49cc..5931a799aa17 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1125,7 +1125,12 @@ int rndis_filter_device_add(struct hv_device *dev, */ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu)); num_possible_rss_qs = cpumask_weight(node_cpu_mask); - net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); + + /* We will use the given number of channels if available. */ + if (device_info->num_chn && device_info->num_chn < net_device->max_chn) + net_device->num_chn = device_info->num_chn; + else + net_device->num_chn = min(num_possible_rss_qs, num_rss_qs); num_rss_qs = net_device->num_chn - 1; net_device->num_sc_offered = num_rss_qs; diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index d0d5bf6cbb68..6422caac8d40 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -97,9 +97,7 @@ struct at86rf230_local { struct at86rf230_state_change irq; - bool tx_aret; unsigned long cal_timeout; - s8 max_frame_retries; bool is_tx; bool is_tx_from_off; u8 tx_retry; @@ -651,7 +649,7 @@ at86rf230_tx_complete(void *context) enable_irq(ctx->irq); - ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret); + ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); } static void @@ -760,17 +758,10 @@ at86rf230_irq_trx_end(struct at86rf230_local *lp) { if (lp->is_tx) { lp->is_tx = 0; - - if (lp->tx_aret) - at86rf230_async_state_change(lp, &lp->irq, - STATE_FORCE_TX_ON, - at86rf230_tx_trac_status, - true); - else - at86rf230_async_state_change(lp, &lp->irq, - STATE_RX_AACK_ON, - at86rf230_tx_complete, - true); + at86rf230_async_state_change(lp, &lp->irq, + STATE_FORCE_TX_ON, + at86rf230_tx_trac_status, + true); } else { at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, at86rf230_rx_trac_check, true); @@ -876,24 +867,16 @@ at86rf230_xmit_start(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - /* In ARET mode we need to go into STATE_TX_ARET_ON after we - * are in STATE_TX_ON. The pfad differs here, so we change - * the complete handler. - */ - if (lp->tx_aret) { - if (lp->is_tx_from_off) { - lp->is_tx_from_off = false; - at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, - at86rf230_write_frame, - false); - } else { - at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_xmit_tx_on, - false); - } + /* check if we change from off state */ + if (lp->is_tx_from_off) { + lp->is_tx_from_off = false; + at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, + at86rf230_write_frame, + false); } else { at86rf230_async_state_change(lp, ctx, STATE_TX_ON, - at86rf230_write_frame, false); + at86rf230_xmit_tx_on, + false); } } @@ -1267,15 +1250,8 @@ static int at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries) { struct at86rf230_local *lp = hw->priv; - int rc = 0; - lp->tx_aret = retries >= 0; - lp->max_frame_retries = retries; - - if (retries >= 0) - rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); - - return rc; + return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries); } static int diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index 613dae559925..c5b54a15fc4c 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -833,6 +833,7 @@ static int cc2520_get_platform_data(struct spi_device *spi, if (!spi_pdata) return -ENOENT; *pdata = *spi_pdata; + priv->fifo_pin = pdata->fifo; return 0; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 20b58bdecf75..a9268db4e349 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -520,12 +520,11 @@ static void ipvlan_link_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; - dev->tx_queue_len = 0; } static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] = diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index c76283c2f84a..dc7d970bd1c0 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -165,10 +165,9 @@ static void loopback_setup(struct net_device *dev) dev->mtu = 64 * 1024; dev->hard_header_len = ETH_HLEN; /* 14 */ dev->addr_len = ETH_ALEN; /* 6 */ - dev->tx_queue_len = 0; dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; netif_keep_dst(dev); dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 34924dfadd00..7b7c70e2341e 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -130,7 +130,7 @@ static const struct net_device_ops nlmon_ops = { static void nlmon_setup(struct net_device *dev) { dev->type = ARPHRD_NETLINK; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &nlmon_ops; dev->ethtool_ops = &nlmon_ethtool_ops; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 3cc316cb7e6b..d8757bf9ad75 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); + if (len < 0) { + ndev->stats.rx_errors++; + ndev->stats.rx_length_errors++; + goto enqueue_again; + } + skb_put(skb, len); skb->protocol = eth_type_trans(skb, ndev); skb->ip_summed = CHECKSUM_NONE; @@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, return; } +enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { dev_kfree_skb(skb); @@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev) rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ndev->mtu + ETH_HLEN); - if (rc == -EINVAL) { + if (rc) { dev_kfree_skb(skb); goto err; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 3320a179ee36..e6897b6a8a53 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -52,6 +52,7 @@ #define MII_M1011_PHY_SCR_MDI_X 0x0020 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 +#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 #define MII_M1145_PHY_EXT_SR 0x1b #define MII_M1145_PHY_EXT_CR 0x14 #define MII_M1145_RGMII_RX_DELAY 0x0080 @@ -552,6 +553,16 @@ static int m88e1111_config_init(struct phy_device *phydev) err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); if (err < 0) return err; + + /* make sure copper is selected */ + err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); + if (err < 0) + return err; + + err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, + err & (~0xff)); + if (err < 0) + return err; } if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 45353613b2ed..43ab691362d4 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -136,6 +136,19 @@ static struct phy_driver realtek_drvs[] = { .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, .driver = { .owner = THIS_MODULE,}, + }, { + .phy_id = 0x001cc914, + .name = "RTL8211DN Gigabit Ethernet", + .phy_id_mask = 0x001fffff, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = rtl821x_ack_interrupt, + .config_intr = rtl8211e_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x001cc915, .name = "RTL8211E Gigabit Ethernet", @@ -170,6 +183,7 @@ module_phy_driver(realtek_drvs); static struct mdio_device_id __maybe_unused realtek_tbl[] = { { 0x001cc912, 0x001fffff }, + { 0x001cc914, 0x001fffff }, { 0x001cc915, 0x001fffff }, { 0x001cc916, 0x001fffff }, { } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index daa054b3ff03..651d35ea22c5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2051,9 +2051,9 @@ static void team_setup(struct net_device *dev) dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; - dev->tx_queue_len = 0; dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_NO_QUEUE; /* * Indicate we support unicast address filtering. That way core won't diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index ec8bd34ce47b..39364a45af40 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -291,8 +291,6 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); int ret; - BUG_ON(!dev); - if (!buf) return -ENOMEM; @@ -319,8 +317,6 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL); int ret; - BUG_ON(!dev); - if (!buf) return -ENOMEM; @@ -351,10 +347,6 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, u32 *src; u32 *dst; - BUG_ON(!dev); - BUG_ON(!data); - BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC); - stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return -ENOMEM; @@ -687,9 +679,6 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 val; int i, ret; - BUG_ON(!dev); - BUG_ON(!data); - ret = lan78xx_eeprom_confirm_not_busy(dev); if (ret) return ret; @@ -737,9 +726,6 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 val; int i, ret; - BUG_ON(!dev); - BUG_ON(!data); - ret = lan78xx_eeprom_confirm_not_busy(dev); if (ret) return ret; @@ -2220,20 +2206,10 @@ static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, spin_lock_irqsave(&list->lock, flags); old_state = entry->state; entry->state = state; - if (!list->prev) - BUG_ON(!list->prev); - if (!list->next) - BUG_ON(!list->next); - if (!skb->prev || !skb->next) - BUG_ON(true); __skb_unlink(skb, list); spin_unlock(&list->lock); spin_lock(&dev->done.lock); - if (!dev->done.prev) - BUG_ON(!dev->done.prev); - if (!dev->done.next) - BUG_ON(!dev->done.next); __skb_queue_tail(&dev->done, skb); if (skb_queue_len(&dev->done) == 1) @@ -2279,8 +2255,7 @@ static void tx_complete(struct urb *urb) usb_autopm_put_interface_async(dev->intf); - if (skb) - defer_bh(dev, skb, &dev->txq, tx_done); + defer_bh(dev, skb, &dev->txq, tx_done); } static void lan78xx_queue_skb(struct sk_buff_head *list, @@ -2295,13 +2270,15 @@ static void lan78xx_queue_skb(struct sk_buff_head *list, netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); + struct sk_buff *skb2 = NULL; - if (skb) - skb_tx_timestamp(skb); - - skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); if (skb) { - skb_queue_tail(&dev->txq_pend, skb); + skb_tx_timestamp(skb); + skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC); + } + + if (skb2) { + skb_queue_tail(&dev->txq_pend, skb2); if (skb_queue_len(&dev->txq_pend) > 10) netif_stop_queue(net); @@ -2748,8 +2725,6 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) memcpy(skb->data + pos, skb2->data, skb2->len); pos += roundup(skb2->len, sizeof(u32)); dev_kfree_skb(skb2); - } else { - BUG_ON(true); } } @@ -2858,11 +2833,6 @@ static void lan78xx_bh(unsigned long param) struct sk_buff *skb; struct skb_data *entry; - if (!dev->done.prev) - BUG_ON(!dev->done.prev); - if (!dev->done.next) - BUG_ON(!dev->done.next); - while ((skb = skb_dequeue(&dev->done))) { entry = (struct skb_data *)(skb->cb); switch (entry->state) { @@ -2882,10 +2852,6 @@ static void lan78xx_bh(unsigned long param) netdev_dbg(dev->net, "skb state %d\n", entry->state); return; } - if (!dev->done.prev) - BUG_ON(!dev->done.prev); - if (!dev->done.next) - BUG_ON(!dev->done.next); } if (netif_device_present(dev->net) && netif_running(dev->net)) { @@ -3156,7 +3122,6 @@ static int lan78xx_probe(struct usb_interface *intf, return 0; - usb_set_intfdata(intf, NULL); out3: lan78xx_unbind(dev, intf); out2: diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 343592c4315f..0ef4a5ad5557 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -306,6 +306,7 @@ static void veth_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 66f08f622dc6..9b950f2db836 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -40,12 +40,12 @@ module_param(gso, bool, 0444); #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) #define GOOD_COPY_LEN 128 -/* Weight used for the RX packet size EWMA. The average packet size is used to - * determine the packet buffer size when refilling RX rings. As the entire RX - * ring may be refilled at once, the weight is chosen so that the EWMA will be - * insensitive to short-term, transient changes in packet size. +/* RX packet size EWMA. The average packet size is used to determine the packet + * buffer size when refilling RX rings. As the entire RX ring may be refilled + * at once, the weight is chosen so that the EWMA will be insensitive to short- + * term, transient changes in packet size. */ -#define RECEIVE_AVG_WEIGHT 64 +DECLARE_EWMA(pkt_len, 1, 64) /* Minimum alignment for mergeable packet buffers. */ #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) @@ -85,7 +85,7 @@ struct receive_queue { struct page *pages; /* Average packet length for mergeable receive buffers. */ - struct ewma mrg_avg_pkt_len; + struct ewma_pkt_len mrg_avg_pkt_len; /* Page frag for packet buffer allocation. */ struct page_frag alloc_frag; @@ -407,7 +407,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } } - ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); return head_skb; err_skb: @@ -600,12 +600,12 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, return err; } -static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len) +static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) { const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); unsigned int len; - len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len), + len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); return ALIGN(len, MERGEABLE_BUFFER_ALIGN); } @@ -1615,7 +1615,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) napi_hash_add(&vi->rq[i].napi); sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); - ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); + ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); } @@ -1658,7 +1658,7 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); - struct ewma *avg; + struct ewma_pkt_len *avg; BUG_ON(queue_index >= vi->max_queue_pairs); avg = &vi->rq[queue_index].mrg_avg_pkt_len; @@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev) /* Do we support "hardware" checksums? */ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { /* This opens up the world of extra features. */ - dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (csum) - dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c new file mode 100644 index 000000000000..dbeffe789185 --- /dev/null +++ b/drivers/net/vrf.c @@ -0,0 +1,669 @@ +/* + * vrf.c: device driver to encapsulate a VRF space + * + * Copyright (c) 2015 Cumulus Networks. All rights reserved. + * Copyright (c) 2015 Shrijeet Mukherjee + * Copyright (c) 2015 David Ahern + * + * Based on dummy, team and ipvlan drivers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "vrf" +#define DRV_VERSION "1.0" + +#define vrf_is_slave(dev) ((dev)->flags & IFF_SLAVE) + +#define vrf_master_get_rcu(dev) \ + ((struct net_device *)rcu_dereference(dev->rx_handler_data)) + +struct pcpu_dstats { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_drps; + u64 rx_pkts; + u64 rx_bytes; + struct u64_stats_sync syncp; +}; + +static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie) +{ + return dst; +} + +static int vrf_ip_local_out(struct sk_buff *skb) +{ + return ip_local_out(skb); +} + +static unsigned int vrf_v4_mtu(const struct dst_entry *dst) +{ + /* TO-DO: return max ethernet size? */ + return dst->dev->mtu; +} + +static void vrf_dst_destroy(struct dst_entry *dst) +{ + /* our dst lives forever - or until the device is closed */ +} + +static unsigned int vrf_default_advmss(const struct dst_entry *dst) +{ + return 65535 - 40; +} + +static struct dst_ops vrf_dst_ops = { + .family = AF_INET, + .local_out = vrf_ip_local_out, + .check = vrf_ip_check, + .mtu = vrf_v4_mtu, + .destroy = vrf_dst_destroy, + .default_advmss = vrf_default_advmss, +}; + +static bool is_ip_rx_frame(struct sk_buff *skb) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} + +static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) +{ + vrf_dev->stats.tx_errors++; + kfree_skb(skb); +} + +/* note: already called with rcu_read_lock */ +static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (is_ip_rx_frame(skb)) { + struct net_device *dev = vrf_master_get_rcu(skb->dev); + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->rx_pkts++; + dstats->rx_bytes += skb->len; + u64_stats_update_end(&dstats->syncp); + + skb->dev = dev; + + return RX_HANDLER_ANOTHER; + } + return RX_HANDLER_PASS; +} + +static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + int i; + + for_each_possible_cpu(i) { + const struct pcpu_dstats *dstats; + u64 tbytes, tpkts, tdrops, rbytes, rpkts; + unsigned int start; + + dstats = per_cpu_ptr(dev->dstats, i); + do { + start = u64_stats_fetch_begin_irq(&dstats->syncp); + tbytes = dstats->tx_bytes; + tpkts = dstats->tx_pkts; + tdrops = dstats->tx_drps; + rbytes = dstats->rx_bytes; + rpkts = dstats->rx_pkts; + } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); + stats->tx_bytes += tbytes; + stats->tx_packets += tpkts; + stats->tx_dropped += tdrops; + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } + return stats; +} + +static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, + struct net_device *dev) +{ + vrf_tx_error(dev, skb); + return NET_XMIT_DROP; +} + +static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4, + struct net_device *vrf_dev) +{ + struct rtable *rt; + int err = 1; + + rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL); + if (IS_ERR(rt)) + goto out; + + /* TO-DO: what about broadcast ? */ + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); + goto out; + } + + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); + err = 0; +out: + return err; +} + +static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, + struct net_device *vrf_dev) +{ + struct iphdr *ip4h = ip_hdr(skb); + int ret = NET_XMIT_DROP; + struct flowi4 fl4 = { + /* needed to match OIF rule */ + .flowi4_oif = vrf_dev->ifindex, + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_tos = RT_TOS(ip4h->tos), + .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC, + .daddr = ip4h->daddr, + }; + + if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) + goto err; + + if (!ip4h->saddr) { + ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, + RT_SCOPE_LINK); + } + + ret = ip_local_out(skb); + if (unlikely(net_xmit_eval(ret))) + vrf_dev->stats.tx_errors++; + else + ret = NET_XMIT_SUCCESS; + +out: + return ret; +err: + vrf_tx_error(vrf_dev, skb); + goto out; +} + +static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + return vrf_process_v4_outbound(skb, dev); + case htons(ETH_P_IPV6): + return vrf_process_v6_outbound(skb, dev); + default: + vrf_tx_error(dev, skb); + return NET_XMIT_DROP; + } +} + +static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) +{ + netdev_tx_t ret = is_ip_tx_frame(skb, dev); + + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->tx_pkts++; + dstats->tx_bytes += skb->len; + u64_stats_update_end(&dstats->syncp); + } else { + this_cpu_inc(dev->dstats->tx_drps); + } + + return ret; +} + +static netdev_tx_t vrf_finish(struct sock *sk, struct sk_buff *skb) +{ + return dev_queue_xmit(skb); +} + +static int vrf_output(struct sock *sk, struct sk_buff *skb) +{ + struct net_device *dev = skb_dst(skb)->dev; + + IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); + + skb->dev = dev; + skb->protocol = htons(ETH_P_IP); + + return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb, + NULL, dev, + vrf_finish, + !(IPCB(skb)->flags & IPSKB_REROUTED)); +} + +static void vrf_rtable_destroy(struct net_vrf *vrf) +{ + struct dst_entry *dst = (struct dst_entry *)vrf->rth; + + dst_destroy(dst); + vrf->rth = NULL; +} + +static struct rtable *vrf_rtable_create(struct net_device *dev) +{ + struct rtable *rth; + + rth = dst_alloc(&vrf_dst_ops, dev, 2, + DST_OBSOLETE_NONE, + (DST_HOST | DST_NOPOLICY | DST_NOXFRM)); + if (rth) { + rth->dst.output = vrf_output; + rth->rt_genid = rt_genid_ipv4(dev_net(dev)); + rth->rt_flags = 0; + rth->rt_type = RTN_UNICAST; + rth->rt_is_input = 0; + rth->rt_iif = 0; + rth->rt_pmtu = 0; + rth->rt_gateway = 0; + rth->rt_uses_gateway = 0; + INIT_LIST_HEAD(&rth->rt_uncached); + rth->rt_uncached_list = NULL; + rth->rt_lwtstate = NULL; + } + + return rth; +} + +/**************************** device handling ********************/ + +/* cycle interface to flush neighbor cache and move routes across tables */ +static void cycle_netdev(struct net_device *dev) +{ + unsigned int flags = dev->flags; + int ret; + + if (!netif_running(dev)) + return; + + ret = dev_change_flags(dev, flags & ~IFF_UP); + if (ret >= 0) + ret = dev_change_flags(dev, flags); + + if (ret < 0) { + netdev_err(dev, + "Failed to cycle device %s; route tables might be wrong!\n", + dev->name); + } +} + +static struct slave *__vrf_find_slave_dev(struct slave_queue *queue, + struct net_device *dev) +{ + struct list_head *head = &queue->all_slaves; + struct slave *slave; + + list_for_each_entry(slave, head, list) { + if (slave->dev == dev) + return slave; + } + + return NULL; +} + +/* inverse of __vrf_insert_slave */ +static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave) +{ + list_del(&slave->list); +} + +static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave) +{ + list_add(&slave->list, &queue->all_slaves); +} + +static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL); + struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL); + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + int ret = -ENOMEM; + + if (!slave || !vrf_ptr) + goto out_fail; + + slave->dev = port_dev; + vrf_ptr->ifindex = dev->ifindex; + vrf_ptr->tb_id = vrf->tb_id; + + /* register the packet handler for slave ports */ + ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); + if (ret) { + netdev_err(port_dev, + "Device %s failed to register rx_handler\n", + port_dev->name); + goto out_fail; + } + + ret = netdev_master_upper_dev_link(port_dev, dev); + if (ret < 0) + goto out_unregister; + + port_dev->flags |= IFF_SLAVE; + __vrf_insert_slave(queue, slave); + rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr); + cycle_netdev(port_dev); + + return 0; + +out_unregister: + netdev_rx_handler_unregister(port_dev); +out_fail: + kfree(vrf_ptr); + kfree(slave); + return ret; +} + +static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev) +{ + if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev)) + return -EINVAL; + + return do_vrf_add_slave(dev, port_dev); +} + +/* inverse of do_vrf_add_slave */ +static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) +{ + struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr); + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + struct slave *slave; + + RCU_INIT_POINTER(port_dev->vrf_ptr, NULL); + + netdev_upper_dev_unlink(port_dev, dev); + port_dev->flags &= ~IFF_SLAVE; + + netdev_rx_handler_unregister(port_dev); + + /* after netdev_rx_handler_unregister for synchronize_rcu */ + kfree(vrf_ptr); + + cycle_netdev(port_dev); + + slave = __vrf_find_slave_dev(queue, port_dev); + if (slave) + __vrf_remove_slave(queue, slave); + + kfree(slave); + + return 0; +} + +static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) +{ + return do_vrf_del_slave(dev, port_dev); +} + +static void vrf_dev_uninit(struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + struct list_head *head = &queue->all_slaves; + struct slave *slave, *next; + + vrf_rtable_destroy(vrf); + + list_for_each_entry_safe(slave, next, head, list) + vrf_del_slave(dev, slave->dev); + + free_percpu(dev->dstats); + dev->dstats = NULL; +} + +static int vrf_dev_init(struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + + INIT_LIST_HEAD(&vrf->queue.all_slaves); + + dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); + if (!dev->dstats) + goto out_nomem; + + /* create the default dst which points back to us */ + vrf->rth = vrf_rtable_create(dev); + if (!vrf->rth) + goto out_stats; + + dev->flags = IFF_MASTER | IFF_NOARP; + + return 0; + +out_stats: + free_percpu(dev->dstats); + dev->dstats = NULL; +out_nomem: + return -ENOMEM; +} + +static const struct net_device_ops vrf_netdev_ops = { + .ndo_init = vrf_dev_init, + .ndo_uninit = vrf_dev_uninit, + .ndo_start_xmit = vrf_xmit, + .ndo_get_stats64 = vrf_get_stats64, + .ndo_add_slave = vrf_add_slave, + .ndo_del_slave = vrf_del_slave, +}; + +static void vrf_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops vrf_ethtool_ops = { + .get_drvinfo = vrf_get_drvinfo, +}; + +static void vrf_setup(struct net_device *dev) +{ + ether_setup(dev); + + /* Initialize the device structure. */ + dev->netdev_ops = &vrf_netdev_ops; + dev->ethtool_ops = &vrf_ethtool_ops; + dev->destructor = free_netdev; + + /* Fill in device structure with ethernet-generic values. */ + eth_hw_addr_random(dev); + + /* don't acquire vrf device's netif_tx_lock when transmitting */ + dev->features |= NETIF_F_LLTX; + + /* don't allow vrf devices to change network namespaces. */ + dev->features |= NETIF_F_NETNS_LOCAL; +} + +static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) +{ + if (tb[IFLA_ADDRESS]) { + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + return -EINVAL; + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + return -EADDRNOTAVAIL; + } + return 0; +} + +static void vrf_dellink(struct net_device *dev, struct list_head *head) +{ + struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr); + + RCU_INIT_POINTER(dev->vrf_ptr, NULL); + kfree_rcu(vrf_ptr, rcu); + unregister_netdevice_queue(dev, head); +} + +static int vrf_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct net_vrf_dev *vrf_ptr; + int err; + + if (!data || !data[IFLA_VRF_TABLE]) + return -EINVAL; + + vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); + + dev->priv_flags |= IFF_VRF_MASTER; + + err = -ENOMEM; + vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL); + if (!vrf_ptr) + goto out_fail; + + vrf_ptr->ifindex = dev->ifindex; + vrf_ptr->tb_id = vrf->tb_id; + + err = register_netdevice(dev); + if (err < 0) + goto out_fail; + + rcu_assign_pointer(dev->vrf_ptr, vrf_ptr); + + return 0; + +out_fail: + kfree(vrf_ptr); + free_netdev(dev); + return err; +} + +static size_t vrf_nl_getsize(const struct net_device *dev) +{ + return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ +} + +static int vrf_fillinfo(struct sk_buff *skb, + const struct net_device *dev) +{ + struct net_vrf *vrf = netdev_priv(dev); + + return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); +} + +static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { + [IFLA_VRF_TABLE] = { .type = NLA_U32 }, +}; + +static struct rtnl_link_ops vrf_link_ops __read_mostly = { + .kind = DRV_NAME, + .priv_size = sizeof(struct net_vrf), + + .get_size = vrf_nl_getsize, + .policy = vrf_nl_policy, + .validate = vrf_validate, + .fill_info = vrf_fillinfo, + + .newlink = vrf_newlink, + .dellink = vrf_dellink, + .setup = vrf_setup, + .maxtype = IFLA_VRF_MAX, +}; + +static int vrf_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + /* only care about unregister events to drop slave references */ + if (event == NETDEV_UNREGISTER) { + struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr); + struct net_device *vrf_dev; + + if (!vrf_ptr || netif_is_vrf(dev)) + goto out; + + vrf_dev = netdev_master_upper_dev_get(dev); + vrf_del_slave(vrf_dev, dev); + } +out: + return NOTIFY_DONE; +} + +static struct notifier_block vrf_notifier_block __read_mostly = { + .notifier_call = vrf_device_event, +}; + +static int __init vrf_init_module(void) +{ + int rc; + + vrf_dst_ops.kmem_cachep = + kmem_cache_create("vrf_ip_dst_cache", + sizeof(struct rtable), 0, + SLAB_HWCACHE_ALIGN, + NULL); + + if (!vrf_dst_ops.kmem_cachep) + return -ENOMEM; + + register_netdevice_notifier(&vrf_notifier_block); + + rc = rtnl_link_register(&vrf_link_ops); + if (rc < 0) + goto error; + + return 0; + +error: + unregister_netdevice_notifier(&vrf_notifier_block); + kmem_cache_destroy(vrf_dst_ops.kmem_cachep); + return rc; +} + +static void __exit vrf_cleanup_module(void) +{ + rtnl_link_unregister(&vrf_link_ops); + unregister_netdevice_notifier(&vrf_notifier_block); + kmem_cache_destroy(vrf_dst_ops.kmem_cachep); +} + +module_init(vrf_init_module); +module_exit(vrf_cleanup_module); +MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); +MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index e90f7a484e1c..ad51dac88d19 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -931,10 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; - if (idx < cb->args[0]) - goto skip; - list_for_each_entry_rcu(rd, &f->remotes, list) { + if (idx < cb->args[0]) + goto skip; + err = vxlan_fdb_info(skb, vxlan, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -942,9 +942,9 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, rd); if (err < 0) goto out; - } skip: - ++idx; + ++idx; + } } } out: @@ -1141,7 +1141,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, union vxlan_addr *remote_ip; /* For flow based devices, map all packets to VNI 0 */ - if (vs->flags & VXLAN_F_FLOW_BASED) + if (vs->flags & VXLAN_F_COLLECT_METADATA) vni = 0; /* Is this VNI defined? */ @@ -1183,7 +1183,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, skb_reset_network_header(skb); /* In flow-based mode, GBP is carried in dst_metadata */ - if (!(vs->flags & VXLAN_F_FLOW_BASED)) + if (!(vs->flags & VXLAN_F_COLLECT_METADATA)) skb->mark = md->gbp; if (oip6) @@ -2129,7 +2129,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) #endif } - if (vxlan->flags & VXLAN_F_FLOW_BASED && + if (vxlan->flags & VXLAN_F_COLLECT_METADATA && info && info->mode == IP_TUNNEL_INFO_TX) { vxlan_xmit_one(skb, dev, NULL, false); return NETDEV_TX_OK; @@ -2416,7 +2416,6 @@ static void vxlan_setup(struct net_device *dev) dev->destructor = free_netdev; SET_NETDEV_DEVTYPE(dev, &vxlan_type); - dev->tx_queue_len = 0; dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= NETIF_F_RXCSUM; @@ -2428,7 +2427,7 @@ static void vxlan_setup(struct net_device *dev) dev->hw_features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; netif_keep_dst(dev); - dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); @@ -2462,7 +2461,6 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_RSC] = { .type = NLA_U8 }, [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 }, [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 }, - [IFLA_VXLAN_FLOWBASED] = { .type = NLA_U8 }, [IFLA_VXLAN_COLLECT_METADATA] = { .type = NLA_U8 }, [IFLA_VXLAN_PORT] = { .type = NLA_U16 }, [IFLA_VXLAN_UDP_CSUM] = { .type = NLA_U8 }, @@ -2814,10 +2812,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_LIMIT]) conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); - if (data[IFLA_VXLAN_FLOWBASED] && - nla_get_u8(data[IFLA_VXLAN_FLOWBASED])) - conf.flags |= VXLAN_F_FLOW_BASED; - if (data[IFLA_VXLAN_COLLECT_METADATA] && nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) conf.flags |= VXLAN_F_COLLECT_METADATA; @@ -2903,7 +2897,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */ - nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_FLOWBASED */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_COLLECT_METADATA */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ nla_total_size(sizeof(struct ifla_vxlan_port_range)) + @@ -2970,8 +2964,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) !!(vxlan->flags & VXLAN_F_L2MISS)) || nla_put_u8(skb, IFLA_VXLAN_L3MISS, !!(vxlan->flags & VXLAN_F_L3MISS)) || - nla_put_u8(skb, IFLA_VXLAN_FLOWBASED, - !!(vxlan->flags & VXLAN_F_FLOW_BASED)) || + nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA, + !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) || nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) || nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 7193b7304fdd..848ea6a399f2 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma) chan->netdev->base_addr = chan->cosa->datareg; chan->netdev->irq = chan->cosa->irq; chan->netdev->dma = chan->cosa->dma; - if (register_hdlc_device(chan->netdev)) { + err = register_hdlc_device(chan->netdev); + if (err) { netdev_warn(chan->netdev, "register_hdlc_device() failed\n"); free_netdev(chan->netdev); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 3ebed1c40abb..e92aaf615901 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1096,7 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) } dev->netdev_ops = &pvc_ops; dev->mtu = HDLC_MAX_MTU; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; if (register_netdevice(dev) != 0) { diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index f79fa6c67ebc..25510679fd2e 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -31,16 +31,19 @@ #include "wmi-ops.h" unsigned int ath10k_debug_mask; +static unsigned int ath10k_cryptmode_param; static bool uart_print; static bool skip_otp; module_param_named(debug_mask, ath10k_debug_mask, uint, 0644); +module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644); module_param(uart_print, bool, 0644); module_param(skip_otp, bool, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); MODULE_PARM_DESC(uart_print, "Uart target debugging"); MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode"); +MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software"); static const struct ath10k_hw_params ath10k_hw_params_list[] = { { @@ -1073,6 +1076,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) return -EINVAL; } + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI; + switch (ath10k_cryptmode_param) { + case ATH10K_CRYPT_MODE_HW: + clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + case ATH10K_CRYPT_MODE_SW: + if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT, + ar->fw_features)) { + ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware"); + return -EINVAL; + } + + set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags); + set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags); + break; + default: + ath10k_info(ar, "invalid cryptmode: %d\n", + ath10k_cryptmode_param); + return -EINVAL; + } + + ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT; + ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT; + + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW; + + /* Workaround: + * + * Firmware A-MSDU aggregation breaks with RAW Tx encap mode + * and causes enormous performance issues (malformed frames, + * etc). + * + * Disabling A-MSDU makes RAW mode stable with heavy traffic + * albeit a bit slower compared to regular operation. + */ + ar->htt.max_num_amsdu = 1; + } + /* Backwards compatibility for firmwares without * ATH10K_FW_IE_WMI_OP_VERSION. */ @@ -1606,6 +1649,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, if (!ar->workqueue) goto err_free_mac; + ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq"); + if (!ar->workqueue_aux) + goto err_free_wq; + mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); @@ -1626,10 +1673,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ret = ath10k_debug_create(ar); if (ret) - goto err_free_wq; + goto err_free_aux_wq; return ar; +err_free_aux_wq: + destroy_workqueue(ar->workqueue_aux); err_free_wq: destroy_workqueue(ar->workqueue); @@ -1645,6 +1694,9 @@ void ath10k_core_destroy(struct ath10k *ar) flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); + flush_workqueue(ar->workqueue_aux); + destroy_workqueue(ar->workqueue_aux); + ath10k_debug_destroy(ar); ath10k_mac_destroy(ar); } diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 78e07051b897..6a387bac27b0 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -92,6 +92,7 @@ struct ath10k_skb_cb { u8 tid; u16 freq; bool is_offchan; + bool nohwcrypt; struct ath10k_htt_txbuf *txbuf; u32 txbuf_paddr; } __packed htt; @@ -152,6 +153,7 @@ struct ath10k_wmi { const struct wmi_ops *ops; u32 num_mem_chunks; + u32 rx_decap_mode; struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS]; }; @@ -341,6 +343,7 @@ struct ath10k_vif { } u; bool use_cts_prot; + bool nohwcrypt; int num_legacy_stations; int txpower; struct wmi_wmm_params_all_arg wmm_params; @@ -382,9 +385,6 @@ struct ath10k_debug { u32 reg_addr; u32 nf_cal_period; - u8 htt_max_amsdu; - u8 htt_max_ampdu; - struct ath10k_fw_crash_data *fw_crash_data; }; @@ -453,16 +453,21 @@ enum ath10k_fw_features { ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6, /* Don't trust error code from otp.bin */ - ATH10K_FW_FEATURE_IGNORE_OTP_RESULT, + ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7, /* Some firmware revisions pad 4th hw address to 4 byte boundary making * it 8 bytes long in Native Wifi Rx decap. */ - ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING, + ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8, /* Firmware supports bypassing PLL setting on init. */ ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9, + /* Raw mode support. If supported, FW supports receiving and trasmitting + * frames in raw mode. + */ + ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -476,6 +481,15 @@ enum ath10k_dev_flags { * waiters should immediately cancel instead of waiting for a time out. */ ATH10K_FLAG_CRASH_FLUSH, + + /* Use Raw mode instead of native WiFi Tx/Rx encap mode. + * Raw mode supports both hardware and software crypto. Native WiFi only + * supports hardware crypto. + */ + ATH10K_FLAG_RAW_MODE, + + /* Disable HW crypto engine */ + ATH10K_FLAG_HW_CRYPTO_DISABLED, }; enum ath10k_cal_mode { @@ -484,6 +498,13 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_DT, }; +enum ath10k_crypt_mode { + /* Only use hardware crypto engine */ + ATH10K_CRYPT_MODE_HW, + /* Only use software crypto engine */ + ATH10K_CRYPT_MODE_SW, +}; + static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) { switch (mode) { @@ -673,6 +694,8 @@ struct ath10k { struct completion vdev_setup_done; struct workqueue_struct *workqueue; + /* Auxiliary workqueue */ + struct workqueue_struct *workqueue_aux; /* prevents concurrent FW reconfiguration */ struct mutex conf_mutex; @@ -695,6 +718,9 @@ struct ath10k { int num_active_peers; int num_tids; + struct work_struct svc_rdy_work; + struct sk_buff *svc_rdy_skb; + struct work_struct offchan_tx_work; struct sk_buff_head offchan_tx_queue; struct completion offchan_tx_completed; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index edf6047997a7..f7aa1c73b481 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -124,11 +124,11 @@ EXPORT_SYMBOL(ath10k_info); void ath10k_print_driver_info(struct ath10k *ar) { - char fw_features[128]; + char fw_features[128] = {}; ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features)); - ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n", + ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n", ar->hw_params.name, ar->target_version, ar->chip_id, @@ -144,6 +144,8 @@ void ath10k_print_driver_info(struct ath10k *ar) ar->htt.op_version, ath10k_cal_mode_str(ar->cal_mode), ar->max_num_stations, + test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags), + !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags), fw_features); ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n", config_enabled(CONFIG_ATH10K_DEBUG), @@ -1363,12 +1365,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file, mutex_lock(&ar->conf_mutex); - if (ar->debug.htt_max_amsdu) - amsdu = ar->debug.htt_max_amsdu; - - if (ar->debug.htt_max_ampdu) - ampdu = ar->debug.htt_max_ampdu; - + amsdu = ar->htt.max_num_amsdu; + ampdu = ar->htt.max_num_ampdu; mutex_unlock(&ar->conf_mutex); len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu); @@ -1402,8 +1400,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, goto out; res = count; - ar->debug.htt_max_amsdu = amsdu; - ar->debug.htt_max_ampdu = ampdu; + ar->htt.max_num_amsdu = amsdu; + ar->htt.max_num_ampdu = ampdu; out: mutex_unlock(&ar->conf_mutex); @@ -1905,9 +1903,6 @@ void ath10k_debug_stop(struct ath10k *ar) if (ar->debug.htt_stats_mask != 0) cancel_delayed_work(&ar->debug.htt_stats_dwork); - ar->debug.htt_max_amsdu = 0; - ar->debug.htt_max_ampdu = 0; - ath10k_wmi_pdev_pktlog_disable(ar); } diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 4474c3e839db..3e6ba63dfdff 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -246,12 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt) } status = ath10k_htt_verify_version(htt); - if (status) + if (status) { + ath10k_warn(ar, "failed to verify htt version: %d\n", + status); return status; + } status = ath10k_htt_send_frag_desc_bank_cfg(htt); if (status) return status; - return ath10k_htt_send_rx_ring_cfg_ll(htt); + status = ath10k_htt_send_rx_ring_cfg_ll(htt); + if (status) { + ath10k_warn(ar, "failed to setup rx ring: %d\n", + status); + return status; + } + + status = ath10k_htt_h2t_aggr_cfg_msg(htt, + htt->max_num_ampdu, + htt->max_num_amsdu); + if (status) { + ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n", + status); + return status; + } + + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 8bdf1e7dd171..573187512895 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -83,15 +83,39 @@ struct htt_ver_req { * around the mask + shift defs. */ struct htt_data_tx_desc_frag { - __le32 paddr; - __le32 len; + union { + struct double_word_addr { + __le32 paddr; + __le32 len; + } __packed dword_addr; + struct triple_word_addr { + __le32 paddr_lo; + __le16 paddr_hi; + __le16 len_16; + } __packed tword_addr; + } __packed; } __packed; struct htt_msdu_ext_desc { - __le32 tso_flag[4]; + __le32 tso_flag[3]; + __le16 ip_identification; + u8 flags; + u8 reserved; struct htt_data_tx_desc_frag frags[6]; }; +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4) + +#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -260,6 +284,9 @@ struct htt_aggr_conf { } __packed; #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 +struct htt_mgmt_tx_desc_qca99x0 { + __le32 rate; +} __packed; struct htt_mgmt_tx_desc { u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)]; @@ -268,6 +295,9 @@ struct htt_mgmt_tx_desc { __le32 len; __le32 vdev_id; u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN]; + union { + struct htt_mgmt_tx_desc_qca99x0 qca99x0; + } __packed; } __packed; enum htt_mgmt_tx_status { @@ -1366,6 +1396,8 @@ struct ath10k_htt { u8 target_version_minor; struct completion target_version_received; enum ath10k_fw_htt_op_version op_version; + u8 max_num_amsdu; + u8 max_num_ampdu; const enum htt_t2h_msg_type *t2h_msg_types; u32 t2h_msg_types_max; @@ -1528,6 +1560,12 @@ struct htt_rx_desc { #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1) +/* These values are default in most firmware revisions and apparently are a + * sweet spot performance wise. + */ +#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3 +#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64 + int ath10k_htt_connect(struct ath10k_htt *htt); int ath10k_htt_init(struct ath10k *ar); int ath10k_htt_setup(struct ath10k_htt *htt); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index d7d118328f31..1b7a04366256 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); - msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), + msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0), RX_MSDU_START_INFO0_MSDU_LENGTH); msdu_chained = rx_desc->frag_info.ring2_more_count; @@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu_chaining = 1; } - last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & + last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) & RX_MSDU_END_INFO0_LAST_MSDU; trace_ath10k_htt_rx_desc(ar, &rx_desc->attention, @@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID)) return NULL; - if (!(rxd->msdu_end.info0 & + if (!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU))) return NULL; @@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, bool is_last; rxd = (void *)msdu->data - sizeof(*rxd); - is_first = !!(rxd->msdu_end.info0 & + is_first = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); - is_last = !!(rxd->msdu_end.info0 & + is_last = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); /* Delivered decapped frame: @@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, skb_trim(msdu, msdu->len - FCS_LEN); /* In most cases this will be true for sniffed frames. It makes sense - * to deliver them as-is without stripping the crypto param. This would - * also make sense for software based decryption (which is not - * implemented in ath10k). + * to deliver them as-is without stripping the crypto param. This is + * necessary for software based decryption. * * If there's no error then the frame is decrypted. At least that is * the case for frames that come in via fragmented rx indication. @@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar, rxd = (void *)msdu->data - sizeof(*rxd); hdr = (void *)rxd->rx_hdr_status; - is_first = !!(rxd->msdu_end.info0 & + is_first = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)); - is_last = !!(rxd->msdu_end.info0 & + is_last = !!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)); is_amsdu = !(is_first && is_last); @@ -1214,7 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, */ rxd = (void *)msdu->data - sizeof(*rxd); - decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), RX_MSDU_START_INFO1_DECAP_FORMAT); switch (decap) { @@ -1244,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb) rxd = (void *)skb->data - sizeof(*rxd); flags = __le32_to_cpu(rxd->attention.flags); - info = __le32_to_cpu(rxd->msdu_start.info1); + info = __le32_to_cpu(rxd->msdu_start.common.info1); is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO); is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO); @@ -1437,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar, first = skb_peek(amsdu); rxd = (void *)first->data - sizeof(*rxd); - decap = MS(__le32_to_cpu(rxd->msdu_start.info1), + decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1), RX_MSDU_START_INFO1_DECAP_FORMAT); if (!chained) @@ -1631,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, __le16 msdu_id; int i; - lockdep_assert_held(&htt->tx_lock); - switch (status) { case HTT_DATA_TX_STATUS_NO_ACK: tx_done.no_ack = true; @@ -1757,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, __skb_queue_tail(amsdu, msdu); rxd = (void *)msdu->data - sizeof(*rxd); - if (rxd->msdu_end.info0 & + if (rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU)) break; } msdu = skb_peek_tail(amsdu); rxd = (void *)msdu->data - sizeof(*rxd); - if (!(rxd->msdu_end.info0 & + if (!(rxd->msdu_end.common.info0 & __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) { skb_queue_splice_init(amsdu, list); return -EAGAIN; @@ -1998,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } - spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); - spin_unlock_bh(&htt->tx_lock); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: - spin_lock_bh(&htt->tx_lock); - __skb_queue_tail(&htt->tx_compl_q, skb); - spin_unlock_bh(&htt->tx_lock); + skb_queue_tail(&htt->tx_compl_q, skb); tasklet_schedule(&htt->txrx_compl_task); return; case HTT_T2H_MSG_TYPE_SEC_IND: { @@ -2072,6 +2065,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; case HTT_T2H_MSG_TYPE_CHAN_CHANGE: break; + case HTT_T2H_MSG_TYPE_AGGR_CONF: + break; case HTT_T2H_MSG_TYPE_EN_STATS: case HTT_T2H_MSG_TYPE_TX_FETCH_IND: case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: @@ -2095,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct htt_resp *resp; struct sk_buff *skb; - spin_lock_bh(&htt->tx_lock); - while ((skb = __skb_dequeue(&htt->tx_compl_q))) { + while ((skb = skb_dequeue(&htt->tx_compl_q))) { ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); } - spin_unlock_bh(&htt->tx_lock); spin_lock_bh(&htt->rx_ring.lock); while ((skb = __skb_dequeue(&htt->rx_compl_q))) { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 148d5b607c3c..704bb5e07193 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) lockdep_assert_held(&htt->tx_lock); - ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC); + ret = idr_alloc(&htt->pending_tx, skb, 0, + htt->max_num_pending_tx, GFP_ATOMIC); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); @@ -133,9 +134,7 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) tx_done.discard = 1; tx_done.msdu_id = msdu_id; - spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); - spin_unlock_bh(&htt->tx_lock); return 0; } @@ -259,6 +258,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); cmd->frag_desc_bank_cfg.bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); + cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); @@ -427,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + spin_unlock_bh(&htt->tx_lock); if (res < 0) { - spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { @@ -448,6 +447,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; + memset(cmd, 0, len); + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); @@ -494,6 +495,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) u16 msdu_id, flags1 = 0; dma_addr_t paddr = 0; u32 frags_paddr = 0; + struct htt_msdu_ext_desc *ext_desc = NULL; res = ath10k_htt_tx_inc_pending(htt); if (res) @@ -501,12 +503,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); + spin_unlock_bh(&htt->tx_lock); if (res < 0) { - spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; - spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); @@ -522,8 +523,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && - ieee80211_has_protected(hdr->frame_control)) + ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } else if (!skb_cb->htt.nohwcrypt && + skb_cb->txmode == ATH10K_HW_TXRX_RAW) { + skb_put(msdu, IEEE80211_CCMP_MIC_LEN); + } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); @@ -537,16 +542,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; /* pass through */ case ATH10K_HW_TXRX_ETHERNET: - frags = skb_cb->htt.txbuf->frags; + if (ar->hw_params.continuous_frag_desc) { + memset(&htt->frag_desc.vaddr[msdu_id], 0, + sizeof(struct htt_msdu_ext_desc)); + frags = (struct htt_data_tx_desc_frag *) + &htt->frag_desc.vaddr[msdu_id].frags; + ext_desc = &htt->frag_desc.vaddr[msdu_id]; + frags[0].tword_addr.paddr_lo = + __cpu_to_le32(skb_cb->paddr); + frags[0].tword_addr.paddr_hi = 0; + frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); - frags[0].paddr = __cpu_to_le32(skb_cb->paddr); - frags[0].len = __cpu_to_le32(msdu->len); - frags[1].paddr = 0; - frags[1].len = 0; + frags_paddr = htt->frag_desc.paddr + + (sizeof(struct htt_msdu_ext_desc) * msdu_id); + } else { + frags = skb_cb->htt.txbuf->frags; + frags[0].dword_addr.paddr = + __cpu_to_le32(skb_cb->paddr); + frags[0].dword_addr.len = __cpu_to_le32(msdu->len); + frags[1].dword_addr.paddr = 0; + frags[1].dword_addr.len = 0; + frags_paddr = skb_cb->htt.txbuf_paddr; + } flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); - - frags_paddr = skb_cb->htt.txbuf_paddr; break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, @@ -580,14 +599,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) prefetch_len); skb_cb->htt.txbuf->htc_hdr.flags = 0; + if (skb_cb->htt.nohwcrypt) + flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; + if (!skb_cb->is_protected) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); - if (msdu->ip_summed == CHECKSUM_PARTIAL) { + if (msdu->ip_summed == CHECKSUM_PARTIAL && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; + if (ar->hw_params.continuous_frag_desc) + ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 917228517546..d9de4a738470 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -217,14 +217,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) /* Known pecularities: - * - current FW doesn't support raw rx mode (last tested v599) - * - current FW dumps upon raw tx mode (last tested v599) * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap * - raw have FCS, nwifi doesn't * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher * param, llc/snap) are aligned to 4byte boundaries each */ enum ath10k_hw_txrx_mode { ATH10K_HW_TXRX_RAW = 0, + + /* Native Wifi decap mode is used to align IP frames to 4-byte + * boundaries and avoid a very expensive re-alignment in mac80211. + */ ATH10K_HW_TXRX_NATIVE_WIFI = 1, ATH10K_HW_TXRX_ETHERNET = 2, @@ -286,10 +288,6 @@ enum ath10k_hw_rate_cck { #define TARGET_RX_TIMEOUT_LO_PRI 100 #define TARGET_RX_TIMEOUT_HI_PRI 40 -/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and - * avoid a very expensive re-alignment in mac80211. */ -#define TARGET_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI - #define TARGET_SCAN_MAX_PENDING_REQS 4 #define TARGET_BMISS_OFFLOAD_MAX_VDEV 3 #define TARGET_ROAM_OFFLOAD_MAX_VDEV 3 @@ -324,7 +322,6 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 #define TARGET_10X_RX_TIMEOUT_HI_PRI 40 -#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI #define TARGET_10X_SCAN_MAX_PENDING_REQS 4 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2 @@ -363,10 +360,7 @@ enum ath10k_hw_rate_cck { (TARGET_10_4_NUM_VDEVS)) #define TARGET_10_4_ACTIVE_PEERS 0 -/* TODO: increase qcache max client limit to 512 after - * testing with 512 client. - */ -#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 256 +#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 #define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 #define TARGET_10_4_NUM_OFFLOAD_PEERS 0 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index c9a7d5b5dffc..96f4285e93b8 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif, return -EOPNOTSUPP; } + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + } + if (cmd == DISABLE_KEY) { arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; @@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif, reinit_completion(&ar->install_key_done); + if (arvif->nohwcrypt) + return 1; + ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); if (ret) return ret; @@ -256,7 +263,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, addr, flags); - if (ret) + if (ret < 0) return ret; flags = 0; @@ -264,7 +271,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, addr, flags); - if (ret) + if (ret < 0) return ret; spin_lock_bh(&ar->data_lock); @@ -322,10 +329,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, peer->keys[i], DISABLE_KEY, addr, flags); - if (ret && first_errno == 0) + if (ret < 0 && first_errno == 0) first_errno = ret; - if (ret) + if (ret < 0) ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); @@ -398,7 +405,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, break; /* key flags are not required to delete the key */ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); - if (ret && first_errno == 0) + if (ret < 0 && first_errno == 0) first_errno = ret; if (ret) @@ -591,11 +598,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, enum wmi_peer_type peer_type) { + struct ath10k_vif *arvif; + int num_peers = 0; int ret; lockdep_assert_held(&ar->conf_mutex); - if (ar->num_peers >= ar->max_num_peers) + num_peers = ar->num_peers; + + /* Each vdev consumes a peer entry as well */ + list_for_each_entry(arvif, &ar->arvifs, list) + num_peers++; + + if (num_peers >= ar->max_num_peers) return -ENOBUFS; ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); @@ -671,20 +686,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); } -static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) -{ - struct ath10k *ar = arvif->ar; - u32 vdev_param; - - if (value != 0xFFFFFFFF) - value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, - ATH10K_FRAGMT_THRESHOLD_MIN, - ATH10K_FRAGMT_THRESHOLD_MAX); - - vdev_param = ar->wmi.vdev_param->fragmentation_threshold; - return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); -} - static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) { int ret; @@ -836,7 +837,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar) static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) { struct cfg80211_chan_def *chandef = NULL; - struct ieee80211_channel *channel = chandef->chan; + struct ieee80211_channel *channel = NULL; struct wmi_vdev_start_request_arg arg = {}; int ret = 0; @@ -2502,6 +2503,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, u32 param; u32 value; + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) + return 0; + if (!(ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | @@ -3149,13 +3153,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for * NativeWifi txmode - it selects AP key instead of peer key. It seems * to work with Ethernet txmode so use it. + * + * FIXME: Check if raw mode works with TDLS. */ if (ieee80211_is_data_present(fc) && sta && sta->tdls) return ATH10K_HW_TXRX_ETHERNET; + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + return ATH10K_HW_TXRX_RAW; + return ATH10K_HW_TXRX_NATIVE_WIFI; } +static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, + struct sk_buff *skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | + IEEE80211_TX_CTL_INJECTED; + if ((info->flags & mask) == mask) + return false; + if (vif) + return !ath10k_vif_to_arvif(vif)->nohwcrypt; + return true; +} + /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS * Control in the header. */ @@ -3322,6 +3343,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) int vdev_id; int ret; unsigned long time_left; + bool tmp_peer_created = false; /* FW requirement: We must create a peer before FW will send out * an offchannel frame. Otherwise the frame will be stuck and @@ -3359,6 +3381,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) if (ret) ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); + tmp_peer_created = (ret == 0); } spin_lock_bh(&ar->data_lock); @@ -3374,7 +3397,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", skb); - if (!peer) { + if (!peer && tmp_peer_created) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", @@ -3600,6 +3623,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, ATH10K_SKB_CB(skb)->htt.is_offchan = false; ATH10K_SKB_CB(skb)->htt.freq = 0; ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); + ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb); ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb); ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc); @@ -3615,12 +3639,11 @@ static void ath10k_tx(struct ieee80211_hw *hw, ath10k_tx_h_8023(skb); break; case ATH10K_HW_TXRX_RAW: - /* FIXME: Packet injection isn't implemented. It should be - * doable with firmware 10.2 on qca988x. - */ - WARN_ON_ONCE(1); - ieee80211_free_txskb(hw, skb); - return; + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + WARN_ON_ONCE(1); + ieee80211_free_txskb(hw, skb); + return; + } } if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { @@ -4019,6 +4042,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask) return 1; } +static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) +{ + u32 value = 0; + struct ath10k *ar = arvif->ar; + + if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) + return 0; + + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) + value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET); + + if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) + value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET); + + if (!value) + return 0; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | + WMI_VDEV_PARAM_TXBF_SU_TX_BFER); + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) + value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; + + if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) + value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | + WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); + + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, + ar->wmi.vdev_param->txbf, value); +} + /* * TODO: * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, @@ -4060,6 +4120,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, sizeof(arvif->bitrate_mask.control[i].vht_mcs)); } + if (ar->num_peers >= ar->max_num_peers) { + ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); + return -ENOBUFS; + } + if (ar->free_vdev_map == 0) { ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); ret = -EBUSY; @@ -4139,6 +4204,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } } + if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) + arvif->nohwcrypt = true; + + if (arvif->nohwcrypt && + !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); + goto err; + } ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, @@ -4237,16 +4310,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, } } - ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); + ret = ath10k_mac_set_txbf_conf(arvif); if (ret) { - ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } - ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); + ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); if (ret) { - ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -4728,6 +4801,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) return 1; + if (arvif->nohwcrypt) + return 1; + if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; @@ -4797,6 +4873,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { + WARN_ON(ret > 0); ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); goto exit; @@ -4812,13 +4889,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); if (ret) { + WARN_ON(ret > 0); ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, peer_addr, flags); - if (ret2) + if (ret2) { + WARN_ON(ret2 > 0); ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret2); + } goto exit; } } @@ -5545,6 +5625,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) return ret; } +static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +{ + /* Even though there's a WMI enum for fragmentation threshold no known + * firmware actually implements it. Moreover it is not possible to rely + * frame fragmentation to mac80211 because firmware clears the "more + * fragments" bit in frame control making it impossible for remote + * devices to reassemble frames. + * + * Hence implement a dummy callback just to say fragmentation isn't + * supported. This effectively prevents mac80211 from doing frame + * fragmentation in software. + */ + return -EOPNOTSUPP; +} + static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { @@ -6387,6 +6482,7 @@ static const struct ieee80211_ops ath10k_ops = { .remain_on_channel = ath10k_remain_on_channel, .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, .set_rts_threshold = ath10k_set_rts_threshold, + .set_frag_threshold = ath10k_mac_op_set_frag_threshold, .flush = ath10k_flush, .tx_last_beacon = ath10k_tx_last_beacon, .set_antenna = ath10k_set_antenna, @@ -6892,7 +6988,6 @@ int ath10k_mac_register(struct ath10k *ar) ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); ieee80211_hw_set(ar->hw, AP_LINK_PS); ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); - ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); @@ -6900,6 +6995,9 @@ int ath10k_mac_register(struct ath10k *ar) ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); ieee80211_hw_set(ar->hw, QUEUE_CONTROL); + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); + ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -7003,7 +7101,8 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } - ar->hw->netdev_features = NETIF_F_HW_CSUM; + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + ar->hw->netdev_features = NETIF_F_HW_CSUM; if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { /* Init ath dfs pattern detector */ diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 5778e5277823..f00b251ec9ce 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -64,6 +64,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ + { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ {0} }; @@ -78,6 +79,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, + { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); @@ -2761,7 +2763,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) static int ath10k_pci_cold_reset(struct ath10k *ar) { - int i; u32 val; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); @@ -2777,23 +2778,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) val |= 1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK) - break; - msleep(1); - } + /* After writing into SOC_GLOBAL_RESET to put device into + * reset and pulling out of reset pcie may not be stable + * for any immediate pcie register access and cause bus error, + * add delay before any pcie access request to fix this issue. + */ + msleep(20); /* Pull Target, including PCIe, out of RESET. */ val &= ~1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); - for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { - if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) & - RTC_STATE_COLD_RESET_MASK)) - break; - msleep(1); - } + msleep(20); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 492b5a5af434..ca8d16884af1 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -422,6 +422,12 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) #define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff +#define RX_MSDU_START_INFO2_DA_IDX_LSB 0 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000 +#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16 +#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11) + /* The decapped header (rx_hdr_status) contains the following: * a) 802.11 header * [padding to 4 bytes] @@ -449,12 +455,23 @@ enum rx_msdu_decap_format { RX_MSDU_DECAP_8023_SNAP_LLC = 3 }; -struct rx_msdu_start { +struct rx_msdu_start_common { __le32 info0; /* %RX_MSDU_START_INFO0_ */ __le32 flow_id_crc; __le32 info1; /* %RX_MSDU_START_INFO1_ */ } __packed; +struct rx_msdu_start_qca99x0 { + __le32 info2; /* %RX_MSDU_START_INFO2_ */ +} __packed; + +struct rx_msdu_start { + struct rx_msdu_start_common common; + union { + struct rx_msdu_start_qca99x0 qca99x0; + } __packed; +} __packed; + /* * msdu_length * MSDU length in bytes after decapsulation. This field is @@ -540,7 +557,7 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) #define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) -struct rx_msdu_end { +struct rx_msdu_end_common { __le16 ip_hdr_cksum; __le16 tcp_hdr_cksum; u8 key_id_octet; @@ -549,6 +566,36 @@ struct rx_msdu_end { __le32 info0; } __packed; +#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff +#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00 +#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000 +#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16 +#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9) + +#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f +#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0 +#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0 +#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000 +#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12 + +struct rx_msdu_end_qca99x0 { + __le32 ipv6_crc; + __le32 tcp_seq_no; + __le32 tcp_ack_no; + __le32 info1; + __le32 info2; +} __packed; + +struct rx_msdu_end { + struct rx_msdu_end_common common; + union { + struct rx_msdu_end_qca99x0 qca99x0; + } __packed; +} __packed; + /* *ip_hdr_chksum * This can include the IP header checksum or the pseudo header @@ -870,7 +917,11 @@ struct rx_ppdu_start { #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) -#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) +#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc +#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2 +#define RX_PPDU_END_INFO1_BB_DATA BIT(0) +#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1) +#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15) struct rx_ppdu_end_common { __le32 evm_p0; @@ -891,13 +942,13 @@ struct rx_ppdu_end_common { __le32 evm_p15; __le32 tsf_timestamp; __le32 wb_timestamp; +} __packed; + +struct rx_ppdu_end_qca988x { u8 locationing_timestamp; u8 phy_err_code; __le16 flags; /* %RX_PPDU_END_FLAGS_ */ __le32 info0; /* %RX_PPDU_END_INFO0_ */ -} __packed; - -struct rx_ppdu_end_qca988x { __le16 bb_length; __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; @@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x { #define RX_PPDU_END_RTT_NORMAL_MODE BIT(31) struct rx_ppdu_end_qca6174 { + u8 locationing_timestamp; + u8 phy_err_code; + __le16 flags; /* %RX_PPDU_END_FLAGS_ */ + __le32 info0; /* %RX_PPDU_END_INFO0_ */ __le32 rtt; /* %RX_PPDU_END_RTT_ */ __le16 bb_length; __le16 info1; /* %RX_PPDU_END_INFO1_ */ } __packed; +#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0) +#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3) +#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4) +#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5) +#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6) +#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7) + +#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff +#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0 +#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000 +#define RX_LOCATION_INFO_FAC_STATUS_LSB 18 +#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000 +#define RX_LOCATION_INFO_PKT_BW_LSB 20 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000 +#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23 +#define RX_LOCATION_INFO_CIR_STATUS BIT(17) +#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25) +#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26) +#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30) +#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31) + +struct rx_pkt_end { + __le32 info0; /* %RX_PKT_END_INFO0_ */ + __le32 phy_timestamp_1; + __le32 phy_timestamp_2; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ +} __packed; + +enum rx_phy_ppdu_end_info0 { + RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2), + RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3), + RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10), + RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11), + RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12), + RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13), + RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15), + RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16), + RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17), + RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18), + RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19), + RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20), + RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21), + RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22), + RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23), + RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24), + RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25), + RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26), + RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27), + RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28), + RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29), + RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30), + RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31), +}; + +enum rx_phy_ppdu_end_info1 { + RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0), + RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4), + RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5), + RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6), + RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7), + RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8), + RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9), + RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10), + RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11), + RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12), + RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13), +}; + +struct rx_phy_ppdu_end { + __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */ + __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */ +} __packed; + +#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff +#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0 + +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff +#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0 +#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25) +#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26) +#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27) +#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28) +#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29) +#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30) + +struct rx_ppdu_end_qca99x0 { + struct rx_pkt_end rx_pkt_end; + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + struct rx_ppdu_end { struct rx_ppdu_end_common common; union { struct rx_ppdu_end_qca988x qca988x; struct rx_ppdu_end_qca6174 qca6174; + struct rx_ppdu_end_qca99x0 qca99x0; } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 6cf289158840..e4a9c4c8d0cb 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct ath10k_skb_cb *skb_cb; struct sk_buff *msdu; - lockdep_assert_held(&htt->tx_lock); - ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d success %d\n", tx_done->msdu_id, !!tx_done->discard, @@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, return; } + spin_lock_bh(&htt->tx_lock); msdu = idr_find(&htt->pending_tx, tx_done->msdu_id); if (!msdu) { ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n", tx_done->msdu_id); + spin_unlock_bh(&htt->tx_lock); return; } + ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); + __ath10k_htt_tx_dec_pending(htt); + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); + spin_unlock_bh(&htt->tx_lock); skb_cb = ATH10K_SKB_CB(msdu); @@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, if (tx_done->discard) { ieee80211_free_txskb(htt->ar->hw, msdu); - goto exit; + return; } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) @@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ - -exit: - ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); - __ath10k_htt_tx_dec_pending(htt); - if (htt->num_pending_tx == 0) - wake_up(&htt->empty_tx_wq); } struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id, diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 47fe2e756bec..2591018c4dc5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -49,6 +49,7 @@ struct wmi_ops { struct wmi_roam_ev_arg *arg); int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb, struct wmi_wow_ev_arg *arg); + enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); @@ -319,6 +320,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_wow_event(ar, skb, arg); } +static inline enum wmi_txbf_conf +ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) +{ + if (!ar->wmi.ops->get_txbf_conf_scheme) + return WMI_TXBF_CONF_UNSUPPORTED; + + return ar->wmi.ops->get_txbf_conf_scheme(ar); +} + static inline int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 4189d4a90ce0..357b5a292a89 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -519,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_TLV_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_TLV_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -1279,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, return skb; } +static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_AFTER_ASSOC; +} + static struct sk_buff * ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, u32 param_value) @@ -1373,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); - cfg->rx_decap_mode = __cpu_to_le32(1); + cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); cfg->scan_max_pending_reqs = __cpu_to_le32(4); cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); @@ -3408,6 +3413,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, + .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 0791a4336e80..36b8f7148b51 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3122,6 +3122,11 @@ static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, return 0; } +static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar) +{ + return WMI_TXBF_CONF_BEFORE_ASSOC; +} + void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) { struct wmi_swba_ev_arg arg = {}; @@ -3498,7 +3503,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar, fftr, fftr_len, tsf); if (res < 0) { - ath10k_warn(ar, "failed to process fft report: %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n", res); return; } @@ -3789,7 +3794,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, - GFP_ATOMIC); + GFP_KERNEL); if (!ar->wmi.mem_chunks[idx].vaddr) { ath10k_warn(ar, "failed to allocate memory chunk\n"); return -ENOMEM; @@ -3878,12 +3883,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } -void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) +static void ath10k_wmi_event_service_ready_work(struct work_struct *work) { + struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work); + struct sk_buff *skb = ar->svc_rdy_skb; struct wmi_svc_rdy_ev_arg arg = {}; u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; int ret; + if (!skb) { + ath10k_warn(ar, "invalid service ready event skb\n"); + return; + } + ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg); if (ret) { ath10k_warn(ar, "failed to parse service ready: %d\n", ret); @@ -4003,9 +4015,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(arg.eeprom_rd), __le32_to_cpu(arg.num_mem_reqs)); + dev_kfree_skb(skb); + ar->svc_rdy_skb = NULL; complete(&ar->wmi.service_ready); } +void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb) +{ + ar->svc_rdy_skb = skb; + queue_work(ar->workqueue_aux, &ar->svc_rdy_work); +} + static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_rdy_ev_arg *arg) { @@ -4177,7 +4197,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -4298,7 +4318,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_10X_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_10X_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -4409,7 +4429,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_10_2_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_10_2_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; @@ -4461,7 +4481,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) break; case WMI_10_4_SERVICE_READY_EVENTID: ath10k_wmi_event_service_ready(ar, skb); - break; + return; case WMI_10_4_SCAN_EVENTID: ath10k_wmi_event_scan(ar, skb); break; @@ -4688,8 +4708,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE); - + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS); @@ -4757,8 +4776,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); - + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); @@ -4823,7 +4841,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); - config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); config.scan_max_pending_reqs = __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); @@ -6431,6 +6449,7 @@ static const struct wmi_ops wmi_10_4_ops = { .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev, .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, @@ -6514,6 +6533,8 @@ int ath10k_wmi_attach(struct ath10k *ar) init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); + INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + return 0; } @@ -6521,6 +6542,11 @@ void ath10k_wmi_detach(struct ath10k *ar) { int i; + cancel_work_sync(&ar->svc_rdy_work); + + if (ar->svc_rdy_skb) + dev_kfree_skb(ar->svc_rdy_skb); + /* free the host memory chunks requested by firmware */ for (i = 0; i < ar->wmi.num_mem_chunks; i++) { dma_free_coherent(ar->dev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 0d4efc9c5796..232500a5d7bd 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4628,6 +4628,11 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) +#define WMI_TXBF_STS_CAP_OFFSET_LSB 4 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_BF_SOUND_DIM_OFFSET_LSB 8 +#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 + /* slot time long */ #define WMI_VDEV_SLOT_TIME_LONG 0x1 /* slot time short */ @@ -6008,6 +6013,12 @@ struct wmi_tdls_peer_capab_arg { u32 pref_offchan_bw; }; +enum wmi_txbf_conf { + WMI_TXBF_CONF_UNSUPPORTED, + WMI_TXBF_CONF_BEFORE_ASSOC, + WMI_TXBF_CONF_AFTER_ASSOC, +}; + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index a68d8fd853a3..8e02b381990f 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw) ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret); exit: + if (ret) { + switch (ar->state) { + case ATH10K_STATE_ON: + ar->state = ATH10K_STATE_RESTARTING; + ret = 1; + break; + case ATH10K_STATE_OFF: + case ATH10K_STATE_RESTARTING: + case ATH10K_STATE_RESTARTED: + case ATH10K_STATE_UTF: + case ATH10K_STATE_WEDGED: + ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n", + ar->state); + ret = -EIO; + break; + } + } + mutex_unlock(&ar->conf_mutex); - return ret ? 1 : 0; + return ret; } int ath10k_wow_init(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index 2399a3921762..b1278f9f24ba 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -5,7 +5,6 @@ config ATH5K select MAC80211_LEDS select LEDS_CLASS select NEW_LEDS - select AVERAGE select ATH5K_AHB if ATH25 select ATH5K_PCI if !ATH25 ---help--- diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c index 5c008757662b..38be2702c0e2 100644 --- a/drivers/net/wireless/ath/ath5k/ani.c +++ b/drivers/net/wireless/ath/ath5k/ani.c @@ -223,7 +223,7 @@ static void ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, bool ofdm_trigger) { - int rssi = ewma_read(&ah->ah_beacon_rssi_avg); + int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)", ofdm_trigger ? "ODFM" : "CCK"); @@ -309,7 +309,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, static void ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) { - int rssi = ewma_read(&ah->ah_beacon_rssi_avg); + int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg); ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity"); diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index e22b0e778927..fa6e89e5c421 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -1252,6 +1252,8 @@ struct ath5k_statistics { #define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */ #define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */ +DECLARE_EWMA(beacon_rssi, 1024, 8) + /* Driver state associated with an instance of a device */ struct ath5k_hw { struct ath_common common; @@ -1432,7 +1434,7 @@ struct ath5k_hw { struct ath5k_nfcal_hist ah_nfcal_hist; /* average beacon RSSI in our BSS (used by ANI) */ - struct ewma ah_beacon_rssi_avg; + struct ewma_beacon_rssi ah_beacon_rssi_avg; /* noise floor from last periodic calibration */ s32 ah_noise_floor; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 23552f43d125..342563a3706f 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1430,7 +1430,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, trace_ath5k_rx(ah, skb); if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) { - ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi); + ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi); /* check beacons in IBSS mode */ if (ah->opmode == NL80211_IFTYPE_ADHOC) @@ -2936,7 +2936,7 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, ah->ah_cal_next_short = jiffies + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); - ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); + ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg); /* clear survey data and cycle counters */ memset(&ah->survey, 0, sizeof(ah->survey)); diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index c70782e8f07b..654a1e33f827 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -722,7 +722,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf, st->mib_intr); len += snprintf(buf + len, sizeof(buf) - len, "beacon RSSI average:\t%d\n", - (int)ewma_read(&ah->ah_beacon_rssi_avg)); + (int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg)); #define CC_PRINT(_struct, _field) \ _struct._field, \ diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h index 14cab1403dd6..112d8a9b8d43 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.h +++ b/drivers/net/wireless/ath/ath6kl/htc.h @@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist { }; /* - * credit distibution code that is passed into the distrbution function, + * credit distribution code that is passed into the distribution function, * there are mandatory and optional codes that must be handled */ enum htc_credit_dist_reason { diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index a7a81b3969ce..c85c47978e1e 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -172,14 +172,6 @@ struct ath_txq { struct sk_buff_head complete_q; }; -struct ath_atx_ac { - struct ath_txq *txq; - struct list_head list; - struct list_head tid_q; - bool clear_ps_filter; - bool sched; -}; - struct ath_frame_info { struct ath_buf *bf; u16 framelen; @@ -242,7 +234,7 @@ struct ath_atx_tid { struct sk_buff_head buf_q; struct sk_buff_head retry_q; struct ath_node *an; - struct ath_atx_ac *ac; + struct ath_txq *txq; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; u16 seq_start; u16 seq_next; @@ -252,8 +244,8 @@ struct ath_atx_tid { int baw_tail; /* next unused tx buffer slot */ s8 bar_index; - bool sched; bool active; + bool clear_ps_filter; }; struct ath_node { @@ -261,7 +253,6 @@ struct ath_node { struct ieee80211_sta *sta; /* station struct we're part of */ struct ieee80211_vif *vif; /* interface with which we're associated */ struct ath_atx_tid tid[IEEE80211_NUM_TIDS]; - struct ath_atx_ac ac[IEEE80211_NUM_ACS]; u16 maxampdu; u8 mpdudensity; @@ -410,6 +401,12 @@ enum ath_offchannel_state { ATH_OFFCHANNEL_ROC_DONE, }; +enum ath_roc_complete_reason { + ATH_ROC_COMPLETE_EXPIRE, + ATH_ROC_COMPLETE_ABORT, + ATH_ROC_COMPLETE_CANCEL, +}; + struct ath_offchannel { struct ath_chanctx chan; struct timer_list timer; @@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, void ath_chanctx_set_next(struct ath_softc *sc, bool force); void ath_offchannel_next(struct ath_softc *sc); void ath_scan_complete(struct ath_softc *sc, bool abort); -void ath_roc_complete(struct ath_softc *sc, bool abort); +void ath_roc_complete(struct ath_softc *sc, + enum ath_roc_complete_reason reason); struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc); #else diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 206665059d66..90f5773a1a61 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc) } } -void ath_roc_complete(struct ath_softc *sc, bool abort) +void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - if (abort) - ath_dbg(common, CHAN_CTX, "RoC aborted\n"); - else - ath_dbg(common, CHAN_CTX, "RoC expired\n"); - sc->offchannel.roc_vif = NULL; sc->offchannel.roc_chan = NULL; - ieee80211_remain_on_channel_expired(sc->hw); + + switch (reason) { + case ATH_ROC_COMPLETE_ABORT: + ath_dbg(common, CHAN_CTX, "RoC aborted\n"); + ieee80211_remain_on_channel_expired(sc->hw); + break; + case ATH_ROC_COMPLETE_EXPIRE: + ath_dbg(common, CHAN_CTX, "RoC expired\n"); + ieee80211_remain_on_channel_expired(sc->hw); + break; + case ATH_ROC_COMPLETE_CANCEL: + ath_dbg(common, CHAN_CTX, "RoC canceled\n"); + break; + } + ath_offchannel_next(sc); ath9k_ps_restore(sc); } @@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data) case ATH_OFFCHANNEL_ROC_START: case ATH_OFFCHANNEL_ROC_WAIT: sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; - ath_roc_complete(sc, false); + ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE); break; default: break; diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index ffca918ff16a..c2ca57a2ed09 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, struct ath_node *an = file->private_data; struct ath_softc *sc = an->sc; struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; u32 len = 0, size = 4096; char *buf; size_t retval; - int tidno, acno; + int tidno; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) @@ -48,19 +47,6 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n", an->mpdudensity); - len += scnprintf(buf + len, size - len, - "%2s%7s\n", "AC", "SCHED"); - - for (acno = 0, ac = &an->ac[acno]; - acno < IEEE80211_NUM_ACS; acno++, ac++) { - txq = ac->txq; - ath_txq_lock(sc, txq); - len += scnprintf(buf + len, size - len, - "%2d%7d\n", - acno, ac->sched); - ath_txq_unlock(sc, txq); - } - len += scnprintf(buf + len, size - len, "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n", "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE", @@ -68,7 +54,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - txq = tid->ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); if (tid->active) { len += scnprintf(buf + len, size - len, @@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, tid->baw_head, tid->baw_tail, tid->bar_index, - tid->sched); + !list_empty(&tid->list)); } ath_txq_unlock(sc, txq); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 39eaf9b6e9b4..1e84882f8c5b 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = { static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv) { - int time_left; + unsigned long time_left; if (atomic_read(&priv->htc->tgt_ready) > 0) { atomic_dec(&priv->htc->tgt_ready); diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index d2408da38c1c..2294709ee8b0 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target) { struct sk_buff *skb; struct htc_config_pipe_msg *cp_msg; - int ret, time_left; + int ret; + unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { @@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target) { struct sk_buff *skb; struct htc_comp_msg *comp_msg; - int ret = 0, time_left; + int ret = 0; + unsigned long time_left; skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC); if (!skb) { @@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target, struct sk_buff *skb; struct htc_endpoint *endpoint; struct htc_conn_svc_msg *conn_msg; - int ret, time_left; + int ret; + unsigned long time_left; /* Find an available endpoint */ endpoint = get_next_avail_ep(target->endpoint); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index a31a6804dc34..1dd0339de372 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -3186,6 +3186,7 @@ static struct { { AR_SREV_VERSION_9550, "9550" }, { AR_SREV_VERSION_9565, "9565" }, { AR_SREV_VERSION_9531, "9531" }, + { AR_SREV_VERSION_9561, "9561" }, }; /* For devices with external radios */ diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index eff0e5325e6a..57f95f2dca5b 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = { BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, }; static const struct ieee80211_iface_combination if_comb_multi[] = { { .limits = if_limits_multi, .n_limits = ARRAY_SIZE(if_limits_multi), - .max_interfaces = 2, + .max_interfaces = 3, .num_different_channels = 2, .beacon_int_infra_match = true, }, @@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); if (ath9k_ps_enable) ieee80211_hw_set(hw, SUPPORTS_PS); @@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_WDS); + if (ath9k_is_chanctx_enabled()) + hw->wiphy->interface_modes |= + BIT(NL80211_IFTYPE_P2P_DEVICE); + hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 90631d768a60..5ad0feeebc86 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c @@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_tx_control txctl; - int time_left; + unsigned long time_left; memset(&txctl, 0, sizeof(txctl)); txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE]; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index cfd45cb8ccfc..c27143ba9ffb 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, u64 multicast) { struct ath_softc *sc = hw->priv; + struct ath_chanctx *ctx; u32 rfilt; changed_flags &= SUPPORTED_FILTERS; *total_flags &= SUPPORTED_FILTERS; spin_lock_bh(&sc->chan_lock); - sc->cur_chan->rxfilter = *total_flags; + ath_for_each_chanctx(sc, ctx) + ctx->rxfilter = *total_flags; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + sc->offchannel.chan.rxfilter = *total_flags; +#endif spin_unlock_bh(&sc->chan_lock); ath9k_ps_wakeup(sc); @@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc) del_timer_sync(&sc->offchannel.timer); if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) - ath_roc_complete(sc, true); + ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT); } if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { @@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) if (sc->offchannel.roc_vif) { if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START) - ath_roc_complete(sc, true); + ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL); } mutex_unlock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index ca533b4321bd..9c16e2a6d185 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, sizeof(struct wmi_cmd_hdr); struct sk_buff *skb; u8 *data; - int time_left, ret = 0; + unsigned long time_left; + int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) return 0; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index b766a7fc60aa..3e3dac3d7060 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) { - struct ath_atx_ac *ac = tid->ac; struct list_head *list; struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; struct ath_chanctx *ctx = avp->chanctx; @@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, if (!ctx) return; - if (tid->sched) - return; - - tid->sched = true; - list_add_tail(&tid->list, &ac->tid_q); - - if (ac->sched) - return; - - ac->sched = true; - list = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; - list_add_tail(&ac->list, list); + if (list_empty(&tid->list)) + list_add_tail(&tid->list, list); } static struct ath_frame_info *get_frame_info(struct sk_buff *skb) @@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) static void ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct ath_txq *txq = tid->ac->txq; + struct ath_txq *txq = tid->txq; struct ieee80211_tx_info *tx_info; struct sk_buff *skb, *tskb; struct ath_buf *bf; @@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid) static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct ath_txq *txq = tid->ac->txq; + struct ath_txq *txq = tid->txq; struct sk_buff *skb; struct ath_buf *bf; struct list_head bf_head; @@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_queue_tid(sc, txq, tid); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) - tid->ac->clear_ps_filter = true; + tid->clear_ps_filter = true; } } @@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, struct ieee80211_tx_rate *rates; u32 max_4ms_framelen, frmlen; u16 aggr_limit, bt_aggr_limit, legacy = 0; - int q = tid->ac->txq->mac80211_qnum; + int q = tid->txq->mac80211_qnum; int i; skb = bf->bf_mpdu; @@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, if (list_empty(&bf_q)) return false; - if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) { - tid->ac->clear_ps_filter = false; + if (tid->clear_ps_filter || tid->an->no_ps_filter) { + tid->clear_ps_filter = false; tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; } @@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; txtid = ATH_AN_2_TID(an, tid); - txq = txtid->ac->txq; + txq = txtid->txq; ath_txq_lock(sc, txq); @@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) { struct ath_node *an = (struct ath_node *)sta->drv_priv; struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); - struct ath_txq *txq = txtid->ac->txq; + struct ath_txq *txq = txtid->txq; ath_txq_lock(sc, txq); txtid->active = false; @@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; bool buffered; int tidno; @@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - if (!tid->sched) { + if (list_empty(&tid->list)) { ath_txq_unlock(sc, txq); continue; } buffered = ath_tid_has_buffered(tid); - tid->sched = false; - list_del(&tid->list); - - if (ac->sched) { - ac->sched = false; - list_del(&ac->list); - } + list_del_init(&tid->list); ath_txq_unlock(sc, txq); @@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; struct ath_txq *txq; int tidno; for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - ac->clear_ps_filter = true; + tid->clear_ps_filter = true; if (ath_tid_has_buffered(tid)) { ath_tx_queue_tid(sc, txq, tid); @@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, an = (struct ath_node *)sta->drv_priv; tid = ATH_AN_2_TID(an, tidno); - txq = tid->ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); @@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, tid = ATH_AN_2_TID(an, i); - ath_txq_lock(sc, tid->ac->txq); + ath_txq_lock(sc, tid->txq); while (nframes > 0) { bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q); if (!bf) @@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, if (an->sta && !ath_tid_has_buffered(tid)) ieee80211_sta_set_buffered(an->sta, i, false); } - ath_txq_unlock_complete(sc, tid->ac->txq); + ath_txq_unlock_complete(sc, tid->txq); } if (list_empty(&bf_q)) @@ -1918,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_atx_ac *ac, *last_ac; struct ath_atx_tid *tid, *last_tid; - struct list_head *ac_list; + struct list_head *tid_list; bool sent = false; if (txq->mac80211_qnum < 0) @@ -1930,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) return; spin_lock_bh(&sc->chan_lock); - ac_list = &sc->cur_chan->acq[txq->mac80211_qnum]; + tid_list = &sc->cur_chan->acq[txq->mac80211_qnum]; - if (list_empty(ac_list)) { + if (list_empty(tid_list)) { spin_unlock_bh(&sc->chan_lock); return; } rcu_read_lock(); - last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list); - while (!list_empty(ac_list)) { + last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list); + while (!list_empty(tid_list)) { bool stop = false; if (sc->cur_chan->stopped) break; - ac = list_first_entry(ac_list, struct ath_atx_ac, list); - last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); - list_del(&ac->list); - ac->sched = false; + tid = list_first_entry(tid_list, struct ath_atx_tid, list); + list_del_init(&tid->list); - while (!list_empty(&ac->tid_q)) { + if (ath_tx_sched_aggr(sc, txq, tid, &stop)) + sent = true; - tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, - list); - list_del(&tid->list); - tid->sched = false; - - if (ath_tx_sched_aggr(sc, txq, tid, &stop)) - sent = true; - - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (ath_tid_has_buffered(tid)) - ath_tx_queue_tid(sc, txq, tid); - - if (stop || tid == last_tid) - break; - } - - if (!list_empty(&ac->tid_q) && !ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, ac_list); - } + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (ath_tid_has_buffered(tid)) + ath_tx_queue_tid(sc, txq, tid); if (stop) break; - if (ac == last_ac) { + if (tid == last_tid) { if (!sent) break; sent = false; - last_ac = list_entry(ac_list->prev, - struct ath_atx_ac, list); + last_tid = list_entry(tid_list->prev, + struct ath_atx_tid, list); } } @@ -2376,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); } else if (txctl->an && queue) { - WARN_ON(tid->ac->txq != txctl->txq); + WARN_ON(tid->txq != txctl->txq); if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) - tid->ac->clear_ps_filter = true; + tid->clear_ps_filter = true; /* * Add this frame to software queue for scheduling later @@ -2873,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) { struct ath_atx_tid *tid; - struct ath_atx_ac *ac; int tidno, acno; for (tidno = 0, tid = &an->tid[tidno]; @@ -2884,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->seq_start = tid->seq_next = 0; tid->baw_size = WME_MAX_BA; tid->baw_head = tid->baw_tail = 0; - tid->sched = false; tid->active = false; + tid->clear_ps_filter = true; __skb_queue_head_init(&tid->buf_q); __skb_queue_head_init(&tid->retry_q); + INIT_LIST_HEAD(&tid->list); acno = TID_TO_WME_AC(tidno); - tid->ac = &an->ac[acno]; - } - - for (acno = 0, ac = &an->ac[acno]; - acno < IEEE80211_NUM_ACS; acno++, ac++) { - ac->sched = false; - ac->clear_ps_filter = true; - ac->txq = sc->tx.txq_map[acno]; - INIT_LIST_HEAD(&ac->tid_q); + tid->txq = sc->tx.txq_map[acno]; } } void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) { - struct ath_atx_ac *ac; struct ath_atx_tid *tid; struct ath_txq *txq; int tidno; @@ -2911,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - ac = tid->ac; - txq = ac->txq; + txq = tid->txq; ath_txq_lock(sc, txq); - if (tid->sched) { - list_del(&tid->list); - tid->sched = false; - } - - if (ac->sched) { - list_del(&ac->list); - tid->ac->sched = false; - } + if (!list_empty(&tid->list)) + list_del_init(&tid->list); ath_tid_drain(sc, txq, tid); tid->active = false; diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c index 508eccf5d982..d59d83e0ce4b 100644 --- a/drivers/net/wireless/ath/debug.c +++ b/drivers/net/wireless/ath/debug.c @@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode) return "P2P-CLIENT"; case NL80211_IFTYPE_P2P_GO: return "P2P-GO"; + case NL80211_IFTYPE_OCB: + return "OCB"; default: return "UNKNOWN"; } diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 050506f842e9..64b432625fbb 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -12,6 +12,7 @@ wil6210-y += debug.o wil6210-y += rx_reorder.o wil6210-y += ioctl.o wil6210-y += fw.o +wil6210-y += pm.o wil6210-y += pmc.o wil6210-$(CONFIG_WIL6210_TRACING) += trace.o wil6210-y += wil_platform.o diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h new file mode 100644 index 000000000000..c131b5e1292f --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/boot_loader.h @@ -0,0 +1,61 @@ +/* Copyright (c) 2015 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* This file contains the definitions for the boot loader + * for the Qualcomm "Sparrow" 60 Gigabit wireless solution. + */ +#ifndef BOOT_LOADER_EXPORT_H_ +#define BOOT_LOADER_EXPORT_H_ + +struct bl_dedicated_registers_v1 { + __le32 boot_loader_ready; /* 0x880A3C driver will poll + * this Dword until BL will + * set it to 1 (initial value + * should be 0) + */ + __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */ + __le16 rf_type; /* 0x880A44 connected RF ID */ + __le16 rf_status; /* 0x880A46 RF status, + * 0 is OK else error + */ + __le32 baseband_type; /* 0x880A48 board type ID */ + u8 mac_address[6]; /* 0x880A4c BL mac address */ + u8 bl_version_major; /* 0x880A52 BL ver. major */ + u8 bl_version_minor; /* 0x880A53 BL ver. minor */ + __le16 bl_version_subminor; /* 0x880A54 BL ver. subminor */ + __le16 bl_version_build; /* 0x880A56 BL ver. build */ + /* valid only for version 2 and above */ + __le32 bl_assert_code; /* 0x880A58 BL Assert code */ + __le32 bl_assert_blink; /* 0x880A5C BL Assert Branch */ + __le32 bl_reserved[22]; /* 0x880A60 - 0x880AB4 */ + __le32 bl_magic_number; /* 0x880AB8 BL Magic number */ +} __packed; + +/* the following struct is the version 0 struct */ + +struct bl_dedicated_registers_v0 { + __le32 boot_loader_ready; /* 0x880A3C driver will poll + * this Dword until BL will + * set it to 1 (initial value + * should be 0) + */ +#define BL_READY (1) /* ready indication */ + __le32 boot_loader_struct_version; /* 0x880A40 BL struct ver. */ + __le32 rf_type; /* 0x880A44 connected RF ID */ + __le32 baseband_type; /* 0x880A48 board type ID */ + u8 mac_address[6]; /* 0x880A4c BL mac address */ +} __packed; + +#endif /* BOOT_LOADER_EXPORT_H_ */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index e4be2d9bbac4..20d07ef679e8 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, else wil_dbg_misc(wil, "Scan has no IE's\n"); - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, - request->ie); - if (rc) { - wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie); + if (rc) goto out; - } rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); @@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, * ies in FW. */ rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie); - if (rc) { - wil_err(wil, "WMI_SET_APPIE_CMD failed\n"); + if (rc) goto out; - } /* WMI_CONNECT_CMD */ memset(&conn, 0, sizeof(conn)); @@ -722,56 +717,52 @@ static int wil_fix_bcon(struct wil6210_priv *wil, { struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - int rc = 0; if (bcon->probe_resp_len <= hlen) return 0; +/* always use IE's from full probe frame, they has more info + * notable RSN + */ + bcon->proberesp_ies = f->u.probe_resp.variable; + bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; if (!bcon->assocresp_ies) { - bcon->assocresp_ies = f->u.probe_resp.variable; - bcon->assocresp_ies_len = bcon->probe_resp_len - hlen; - rc = 1; + bcon->assocresp_ies = bcon->proberesp_ies; + bcon->assocresp_ies_len = bcon->proberesp_ies_len; } - return rc; + return 1; } /* internal functions for device reset and starting AP */ static int _wil_cfg80211_set_ies(struct wiphy *wiphy, - size_t probe_ies_len, const u8 *probe_ies, - size_t assoc_ies_len, const u8 *assoc_ies) - + struct cfg80211_beacon_data *bcon) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); - /* FW do not form regular beacon, so bcon IE's are not set - * For the DMG bcon, when it will be supported, bcon IE's will - * be reused; add something like: - * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, - * bcon->beacon_ies); - */ - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies); - if (rc) { - wil_err(wil, "set_ie(PROBE_RESP) failed\n"); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, + bcon->proberesp_ies); + if (rc) return rc; - } - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies); - if (rc) { - wil_err(wil, "set_ie(ASSOC_RESP) failed\n"); + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, + bcon->assocresp_ies); +#if 0 /* to use beacon IE's, remove this #if 0 */ + if (rc) return rc; - } - return 0; + rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); +#endif + + return rc; } static int _wil_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, const u8 *ssid, size_t ssid_len, u32 privacy, int bi, u8 chan, - size_t probe_ies_len, const u8 *probe_ies, - size_t assoc_ies_len, const u8 *assoc_ies, + struct cfg80211_beacon_data *bcon, u8 hidden_ssid) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); @@ -792,8 +783,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, if (rc) goto out; - rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies, - assoc_ies_len, assoc_ies); + rc = _wil_cfg80211_set_ies(wiphy, bcon); if (rc) goto out; @@ -827,27 +817,20 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - const u8 *pr_ies = NULL; - size_t pr_ies_len = 0; int rc; u32 privacy = 0; wil_dbg_misc(wil, "%s()\n", __func__); wil_print_bcon_data(bcon); - if (bcon->probe_resp_len > hlen) { - pr_ies = f->u.probe_resp.variable; - pr_ies_len = bcon->probe_resp_len - hlen; - } - if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); } - if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len)) + if (bcon->proberesp_ies && + cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, + bcon->proberesp_ies_len)) privacy = 1; /* in case privacy has changed, need to restart the AP */ @@ -860,14 +843,10 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, wdev->ssid_len, privacy, wdev->beacon_interval, - wil->channel, pr_ies_len, pr_ies, - bcon->assocresp_ies_len, - bcon->assocresp_ies, + wil->channel, bcon, wil->hidden_ssid); } else { - rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies, - bcon->assocresp_ies_len, - bcon->assocresp_ies); + rc = _wil_cfg80211_set_ies(wiphy, bcon); } return rc; @@ -882,10 +861,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, struct ieee80211_channel *channel = info->chandef.chan; struct cfg80211_beacon_data *bcon = &info->beacon; struct cfg80211_crypto_settings *crypto = &info->crypto; - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - const u8 *pr_ies = NULL; - size_t pr_ies_len = 0; u8 hidden_ssid; wil_dbg_misc(wil, "%s()\n", __func__); @@ -925,11 +900,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); - if (bcon->probe_resp_len > hlen) { - pr_ies = f->u.probe_resp.variable; - pr_ies_len = bcon->probe_resp_len - hlen; - } - if (wil_fix_bcon(wil, bcon)) { wil_dbg_misc(wil, "Fixed bcon\n"); wil_print_bcon_data(bcon); @@ -938,10 +908,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, - pr_ies_len, pr_ies, - bcon->assocresp_ies_len, - bcon->assocresp_ies, - hidden_ssid); + bcon, hidden_ssid); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 75219a1b8805..613ca2b2527b 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, seq_printf(s, " swhead = %d\n", vring->swhead); seq_printf(s, " hwtail = [0x%08x] -> ", vring->hwtail); if (x) { - v = ioread32(x); + v = readl(x); seq_printf(s, "0x%08x = %d\n", v, v); } else { seq_puts(s, "???\n"); @@ -268,7 +268,7 @@ static const struct file_operations fops_mbox = { static int wil_debugfs_iomem_x32_set(void *data, u64 val) { - iowrite32(val, (void __iomem *)data); + writel(val, (void __iomem *)data); wmb(); /* make sure write propagated to HW */ return 0; @@ -276,7 +276,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val) static int wil_debugfs_iomem_x32_get(void *data, u64 *val) { - *val = ioread32((void __iomem *)data); + *val = readl((void __iomem *)data); return 0; } @@ -306,7 +306,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get, - wil_debugfs_ulong_set, "%llu\n"); + wil_debugfs_ulong_set, "0x%llx\n"); static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, @@ -477,7 +477,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data) void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr)); if (a) - seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a)); + seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a)); else seq_printf(s, "[0x%08x] = INVALID\n", mem_addr); @@ -1344,6 +1344,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) { int i; u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; + unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout, r->head_seq_num); @@ -1353,7 +1354,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) else seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_'); } - seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop); + seq_printf(s, + "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n", + r->total, drop_dup + drop_old, drop_dup, drop_old, + r->ssn_last_drop); } static int wil_sta_debugfs_show(struct seq_file *s, void *data) diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c index 0ea695ff98ad..7053b62ca8d3 100644 --- a/drivers/net/wireless/ath/wil6210/ethtool.c +++ b/drivers/net/wireless/ath/wil6210/ethtool.c @@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev, wil_dbg_misc(wil, "%s()\n", __func__); - tx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL)); + tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL); if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN) - tx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH)); + tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH); - rx_itr_en = ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL)); + rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL); if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN) - rx_itr_val = - ioread32(wil->csr + - HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH)); + rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH); cp->tx_coalesce_usecs = tx_itr_val; cp->rx_coalesce_usecs = rx_itr_val; diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c index 4428345e5a47..82aae2d705b4 100644 --- a/drivers/net/wireless/ath/wil6210/fw.c +++ b/drivers/net/wireless/ath/wil6210/fw.c @@ -22,16 +22,6 @@ MODULE_FIRMWARE(WIL_FW_NAME); MODULE_FIRMWARE(WIL_FW2_NAME); -/* target operations */ -/* register read */ -#define R(a) ioread32(wil->csr + HOSTADDR(a)) -/* register write. wmb() to make sure it is completed */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) -/* register set = read, OR, write */ -#define S(a, v) W(a, R(a) | v) -/* register clear = read, AND with inverted, write */ -#define C(a, v) W(a, R(a) & ~v) - static void wil_memset_toio_32(volatile void __iomem *dst, u32 val, size_t count) diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c index 157f5ef384e0..d30657ee7e83 100644 --- a/drivers/net/wireless/ath/wil6210/fw_inc.c +++ b/drivers/net/wireless/ath/wil6210/fw_inc.c @@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data, FW_ADDR_CHECK(dst, block[i].addr, "address"); - x = ioread32(dst); + x = readl(dst); y = (x & m) | (v & ~m); wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x " "(old 0x%08x val 0x%08x mask 0x%08x)\n", le32_to_cpu(block[i].addr), y, x, v, m); - iowrite32(y, dst); + writel(y, dst); wmb(); /* finish before processing next record */ } @@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr, { unsigned delay = 0; - iowrite32(a, gwa_addr); - iowrite32(gw_cmd, gwa_cmd); + writel(a, gwa_addr); + writel(gw_cmd, gwa_cmd); wmb(); /* finish before activate gw */ - iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */ + writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */ do { udelay(1); /* typical time is few usec */ if (delay++ > 100) { wil_err_fw(wil, "gw timeout\n"); return -EINVAL; } - } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */ + } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */ return 0; } @@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data, wil_dbg_fw(wil, " gw write[%3d] [0x%08x] <== 0x%08x\n", i, a, v); - iowrite32(v, gwa_val); + writel(v, gwa_val); rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); if (rc) return rc; @@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data, sizeof(v), false); for (k = 0; k < ARRAY_SIZE(block->value); k++) - iowrite32(v[k], gwa_val[k]); + writel(v[k], gwa_val[k]); rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 28ffc18466c4..a371f036d054 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr) static inline void wil_icr_clear(u32 x, void __iomem *addr) { - iowrite32(x, addr); + writel(x, addr); } #endif /* defined(CONFIG_WIL6210_ISR_COR) */ static inline u32 wil_ioread32_and_clear(void __iomem *addr) { - u32 x = ioread32(addr); + u32 x = readl(addr); wil_icr_clear(x, addr); @@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr) static void wil6210_mask_irq_tx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_rx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_misc(struct wil6210_priv *wil) { - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMS)); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS), + WIL6210_IRQ_DISABLE); } static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); - iowrite32(WIL6210_IRQ_DISABLE, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); + wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE); clear_bit(wil_status_irqen, wil->status); } void wil6210_unmask_irq_tx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_TX, wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_TX); } void wil6210_unmask_irq_rx(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_RX, wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_RX); } static void wil6210_unmask_irq_misc(struct wil6210_priv *wil) { - iowrite32(WIL6210_IMC_MISC, wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMC)); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC), + WIL6210_IMC_MISC); } static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) @@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) set_bit(wil_status_irqen, wil->status); - iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); + wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK); } void wil_mask_irq(struct wil6210_priv *wil) @@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, ICC)); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, ICC)); - iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, ICC)); + wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); + wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); + wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC), + WIL_ICR_ICC_VALUE); wil6210_unmask_irq_pseudo(wil); wil6210_unmask_irq_tx(wil); @@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil) wil6210_unmask_irq_misc(wil); } -/* target write operation */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) - void wil_configure_interrupt_moderation(struct wil6210_priv *wil) { wil_dbg_irq(wil, "%s()\n", __func__); @@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil) return; /* Disable and clear tx counter before (re)configuration */ - W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); - W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); + wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n", wil->tx_max_burst_duration); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_TX_CNT_CTL, - BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, + BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL); /* Disable and clear tx idle counter before (re)configuration */ - W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR); - W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout); wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n", wil->tx_interframe_timeout); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN | - BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN | + BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL); /* Disable and clear rx counter before (re)configuration */ - W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR); - W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration); + wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration); wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n", wil->rx_max_burst_duration); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_RX_CNT_CTL, - BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, + BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL); /* Disable and clear rx idle counter before (re)configuration */ - W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR); - W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout); wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n", wil->rx_interframe_timeout); /* Configure TX max burst duration timer to use usec units */ - W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN | - BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); + wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN | + BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); } -#undef W - static irqreturn_t wil6210_irq_rx(int irq, void *cookie) { struct wil6210_priv *wil = cookie; @@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) u32 icr_rx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_rx = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_RX_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR + + offsetof(struct RGF_ICR, IMV)); u32 icm_tx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICM)); u32 icr_tx = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_tx = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_TX_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR + + offsetof(struct RGF_ICR, IMV)); u32 icm_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICM)); u32 icr_misc = wil_ioread32_and_clear(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + offsetof(struct RGF_ICR, ICR)); - u32 imv_misc = ioread32(wil->csr + - HOSTADDR(RGF_DMA_EP_MISC_ICR) + - offsetof(struct RGF_ICR, IMV)); + u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR + + offsetof(struct RGF_ICR, IMV)); wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n" "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" @@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) { irqreturn_t rc = IRQ_HANDLED; struct wil6210_priv *wil = cookie; - u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE)); + u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE); /** * pseudo_cause is Clear-On-Read, no need to ACK @@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) return rc; } -static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) -{ - int rc; - /* - * IRQ's are in the following order: - * - Tx - * - Rx - * - Misc - */ - - rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED, - WIL_NAME"_tx", wil); - if (rc) - return rc; - - rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED, - WIL_NAME"_rx", wil); - if (rc) - goto free0; - - rc = request_threaded_irq(irq + 2, wil6210_irq_misc, - wil6210_irq_misc_thread, - IRQF_SHARED, WIL_NAME"_misc", wil); - if (rc) - goto free1; - - return 0; - /* error branch */ -free1: - free_irq(irq + 1, wil); -free0: - free_irq(irq, wil); - - return rc; -} - /* can't use wil_ioread32_and_clear because ICC value is not set yet */ static inline void wil_clear32(void __iomem *addr) { - u32 x = ioread32(addr); + u32 x = readl(addr); - iowrite32(x, addr); + writel(x, addr); } void wil6210_clear_irq(struct wil6210_priv *wil) @@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil) wmb(); /* make sure write completed */ } -int wil6210_init_irq(struct wil6210_priv *wil, int irq) +int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi) { int rc; - wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi); + wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx"); - if (wil->n_msi == 3) - rc = wil6210_request_3msi(wil, irq); - else - rc = request_threaded_irq(irq, wil6210_hardirq, - wil6210_thread_irq, - wil->n_msi ? 0 : IRQF_SHARED, - WIL_NAME, wil); + rc = request_threaded_irq(irq, wil6210_hardirq, + wil6210_thread_irq, + use_msi ? 0 : IRQF_SHARED, + WIL_NAME, wil); return rc; } @@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq) wil_mask_irq(wil); free_irq(irq, wil); - if (wil->n_msi == 3) { - free_irq(irq + 1, wil); - free_irq(irq + 2, wil); - } } diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c index e9c0673819c6..f7f948621951 100644 --- a/drivers/net/wireless/ath/wil6210/ioctl.c +++ b/drivers/net/wireless/ath/wil6210/ioctl.c @@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data) /* operation */ switch (io.op & wil_mmio_op_mask) { case wil_mmio_read: - io.val = ioread32(a); + io.val = readl(a); need_copy = true; break; case wil_mmio_write: - iowrite32(io.val, a); + writel(io.val, a); wmb(); /* make sure write propagated to HW */ break; default: diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 6ca6193ab8a6..2fb04c51da53 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -21,6 +21,7 @@ #include "wil6210.h" #include "txrx.h" #include "wmi.h" +#include "boot_loader.h" #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 @@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x) clear_bit(wil_status_fwready, wil->status); wil_err(wil, "Scan timeout detected, start fw error recovery\n"); - wil->recovery_state = fw_recovery_pending; - schedule_work(&wil->fw_error_worker); + wil_fw_error_recovery(wil); } static int wil_wait_for_recovery(struct wil6210_priv *wil) @@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil) destroy_workqueue(wil->wmi_wq); } -/* target operations */ -/* register read */ -#define R(a) ioread32(wil->csr + HOSTADDR(a)) -/* register write. wmb() to make sure it is completed */ -#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) -/* register set = read, OR, write */ -#define S(a, v) W(a, R(a) | v) -/* register clear = read, AND with inverted, write */ -#define C(a, v) W(a, R(a) & ~v) - static inline void wil_halt_cpu(struct wil6210_priv *wil) { - W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); - W(RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); + wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST); + wil_w(wil, RGF_USER_MAC_CPU_0, BIT_USER_MAC_CPU_MAN_RST); } static inline void wil_release_cpu(struct wil6210_priv *wil) { /* Start CPU */ - W(RGF_USER_USER_CPU_0, 1); + wil_w(wil, RGF_USER_USER_CPU_0, 1); } static int wil_target_reset(struct wil6210_priv *wil) @@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil) wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); /* Clear MAC link up */ - S(RGF_HP_CTRL, BIT(15)); - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); - S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + wil_s(wil, RGF_HP_CTRL, BIT(15)); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); wil_halt_cpu(wil); /* clear all boot loader "ready" bits */ - W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0); + wil_w(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0); /* Clear Fw Download notification */ - C(RGF_USER_USAGE_6, BIT(0)); + wil_c(wil, RGF_USER_USAGE_6, BIT(0)); - S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); + wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN); /* XTAL stabilization should take about 3ms */ usleep_range(5000, 7000); - x = R(RGF_CAF_PLL_LOCK_STATUS); + x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS); if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) { wil_err(wil, "Xtal stabilization timeout\n" "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x); return -ETIME; } /* switch 10k to XTAL*/ - C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); + wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF); /* 40 MHz */ - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); - W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0); + wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */ + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003); + /* reset A2 PCIE AHB */ + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); - W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); + wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); /* wait until device ready. typical time is 20..80 msec */ do { msleep(RST_DELAY); - x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready)); + x = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_ready)); if (x1 != x) { wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x); x1 = x; @@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil) x); return -ETIME; } - } while (x != BIT_BL_READY); + } while (x != BL_READY); - C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); + wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); /* enable fix for HW bug related to the SA/DA swap in AP Rx */ - S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | - BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); + wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN | + BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC); wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); return 0; @@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) static int wil_get_bl_info(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); - struct RGF_BL bl; + union { + struct bl_dedicated_registers_v0 bl0; + struct bl_dedicated_registers_v1 bl1; + } bl; + u32 bl_ver; + u8 *mac; + u16 rf_status; - wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl)); - le32_to_cpus(&bl.ready); - le32_to_cpus(&bl.version); - le32_to_cpus(&bl.rf_type); - le32_to_cpus(&bl.baseband_type); + wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), + sizeof(bl)); + bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version); + mac = bl.bl0.mac_address; - if (!is_valid_ether_addr(bl.mac_address)) { - wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address); + if (bl_ver == 0) { + le32_to_cpus(&bl.bl0.rf_type); + le32_to_cpus(&bl.bl0.baseband_type); + rf_status = 0; /* actually, unknown */ + wil_info(wil, + "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n", + bl_ver, mac, + bl.bl0.rf_type, bl.bl0.baseband_type); + wil_info(wil, "Boot Loader build unknown for struct v0\n"); + } else { + le16_to_cpus(&bl.bl1.rf_type); + rf_status = le16_to_cpu(bl.bl1.rf_status); + le32_to_cpus(&bl.bl1.baseband_type); + le16_to_cpus(&bl.bl1.bl_version_subminor); + le16_to_cpus(&bl.bl1.bl_version_build); + wil_info(wil, + "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n", + bl_ver, mac, + bl.bl1.rf_type, rf_status, + bl.bl1.baseband_type); + wil_info(wil, "Boot Loader build %d.%d.%d.%d\n", + bl.bl1.bl_version_major, bl.bl1.bl_version_minor, + bl.bl1.bl_version_subminor, bl.bl1.bl_version_build); + } + + if (!is_valid_ether_addr(mac)) { + wil_err(wil, "BL: Invalid MAC %pM\n", mac); return -EINVAL; } - ether_addr_copy(ndev->perm_addr, bl.mac_address); + ether_addr_copy(ndev->perm_addr, mac); if (!is_valid_ether_addr(ndev->dev_addr)) - ether_addr_copy(ndev->dev_addr, bl.mac_address); - wil_info(wil, - "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n", - bl.version, bl.mac_address, bl.rf_type, bl.baseband_type); + ether_addr_copy(ndev->dev_addr, mac); + + if (rf_status) {/* bad RF cable? */ + wil_err(wil, "RF communication error 0x%04x", + rf_status); + return -EAGAIN; + } return 0; } +static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err) +{ + u32 bl_assert_code, bl_assert_blink, bl_magic_number; + u32 bl_ver = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v0, + boot_loader_struct_version)); + + if (bl_ver < 2) + return; + + bl_assert_code = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_assert_code)); + bl_assert_blink = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_assert_blink)); + bl_magic_number = wil_r(wil, RGF_USER_BL + + offsetof(struct bl_dedicated_registers_v1, + bl_magic_number)); + + if (is_err) { + wil_err(wil, + "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n", + bl_assert_code, bl_assert_blink, bl_magic_number); + } else { + wil_dbg_misc(wil, + "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n", + bl_assert_code, bl_assert_blink, bl_magic_number); + } +} + static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); @@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_dbg_misc(wil, "%s()\n", __func__); - if (wil->hw_version == HW_VER_UNKNOWN) - return -ENODEV; - WARN_ON(!mutex_is_locked(&wil->mutex)); WARN_ON(test_bit(wil_status_napi_en, wil->status)); @@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return 0; } + if (wil->hw_version == HW_VER_UNKNOWN) + return -ENODEV; + cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); wil_bcast_fini(wil); @@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) flush_workqueue(wil->wq_service); flush_workqueue(wil->wmi_wq); + wil_bl_crash_info(wil, false); rc = wil_target_reset(wil); wil_rx_fini(wil); - if (rc) + if (rc) { + wil_bl_crash_info(wil, true); return rc; + } rc = wil_get_bl_info(wil); + if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */ + rc = 0; if (rc) return rc; @@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; /* Mark FW as loaded from host */ - S(RGF_USER_USAGE_6, 1); + wil_s(wil, RGF_USER_USAGE_6, 1); /* clear any interrupts which on-card-firmware * may have set @@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil6210_clear_irq(wil); /* CAF_ICR - clear and mask */ /* it is W1C, clear by writing back same value */ - S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); wil_release_cpu(wil); } @@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) return rc; } -#undef R -#undef W -#undef S -#undef C - void wil_fw_error_recovery(struct wil6210_priv *wil) { wil_dbg_misc(wil, "starting fw error recovery\n"); diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 8ef18ace110f..e3b3c8fb4605 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev) wil_set_ethtoolops(ndev); ndev->ieee80211_ptr = wdev; ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GRO; + NETIF_F_SG | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXHASH; + ndev->features |= ndev->hw_features; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); wdev->netdev = ndev; diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index aa3ecc607ca3..feff1ef10fb3 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -21,16 +21,14 @@ #include "wil6210.h" -static int use_msi = 1; -module_param(use_msi, int, S_IRUGO); -MODULE_PARM_DESC(use_msi, - " Use MSI interrupt: " - "0 - don't, 1 - (default) - single, or 3"); +static bool use_msi = true; +module_param(use_msi, bool, S_IRUGO); +MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); static void wil_set_capabilities(struct wil6210_priv *wil) { - u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID)); + u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); bitmap_zero(wil->hw_capabilities, hw_capability_last); @@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil) void wil_disable_irq(struct wil6210_priv *wil) { - int irq = wil->pdev->irq; - - disable_irq(irq); - if (wil->n_msi == 3) { - disable_irq(irq + 1); - disable_irq(irq + 2); - } + disable_irq(wil->pdev->irq); } void wil_enable_irq(struct wil6210_priv *wil) { - int irq = wil->pdev->irq; - - enable_irq(irq); - if (wil->n_msi == 3) { - enable_irq(irq + 1); - enable_irq(irq + 2); - } + enable_irq(wil->pdev->irq); } /* Bus ops */ @@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) * and only MSI should be used */ int msi_only = pdev->msi_enabled; + bool _use_msi = use_msi; wil_dbg_misc(wil, "%s()\n", __func__); @@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil) pci_set_master(pdev); - /* - * how many MSI interrupts to request? - */ - switch (use_msi) { - case 3: - case 1: - wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi); - break; - case 0: - wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n"); - break; - default: - wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi); - use_msi = 1; - } + wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx"); - if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) { - wil_err(wil, "3 MSI mode failed, try 1 MSI\n"); - use_msi = 1; - } - - if (use_msi == 1 && pci_enable_msi(pdev)) { + if (use_msi && pci_enable_msi(pdev)) { wil_err(wil, "pci_enable_msi failed, use INTx\n"); - use_msi = 0; + _use_msi = false; } - wil->n_msi = use_msi; - - if ((wil->n_msi == 0) && msi_only) { + if (!_use_msi && msi_only) { wil_err(wil, "Interrupt pin not routed, unable to use INTx\n"); rc = -ENODEV; goto stop_master; } - rc = wil6210_init_irq(wil, pdev->irq); + rc = wil6210_init_irq(wil, pdev->irq, _use_msi); if (rc) goto stop_master; @@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = { }; MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); +#ifdef CONFIG_PM + +static int wil6210_suspend(struct device *dev, bool is_runtime) +{ + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + rc = wil_can_suspend(wil, is_runtime); + if (rc) + goto out; + + rc = wil_suspend(wil, is_runtime); + if (rc) + goto out; + + /* TODO: how do I bring card in low power state? */ + + /* disable bus mastering */ + pci_clear_master(pdev); + /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */ + +out: + return rc; +} + +static int wil6210_resume(struct device *dev, bool is_runtime) +{ + int rc = 0; + struct pci_dev *pdev = to_pci_dev(dev); + struct wil6210_priv *wil = pci_get_drvdata(pdev); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + /* allow master */ + pci_set_master(pdev); + + rc = wil_resume(wil, is_runtime); + if (rc) + pci_clear_master(pdev); + + return rc; +} + +#ifdef CONFIG_PM_SLEEP +static int wil6210_pm_suspend(struct device *dev) +{ + return wil6210_suspend(dev, false); +} + +static int wil6210_pm_resume(struct device *dev) +{ + return wil6210_resume(dev, false); +} +#endif /* CONFIG_PM_SLEEP */ + +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops wil6210_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume) +}; + static struct pci_driver wil6210_driver = { .probe = wil_pcie_probe, .remove = wil_pcie_remove, .id_table = wil6210_pcie_ids, .name = WIL_NAME, + .driver = { + .pm = &wil6210_pm_ops, + }, }; static int __init wil6210_driver_init(void) diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c new file mode 100644 index 000000000000..0b7ecbcac19c --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wil6210.h" + +int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct wireless_dev *wdev = wil->wdev; + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + switch (wdev->iftype) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + break; + /* AP-like interface - can't suspend */ + default: + wil_dbg_pm(wil, "AP-like interface\n"); + rc = -EBUSY; + break; + } + + wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, + is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); + + return rc; +} + +int wil_suspend(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + /* if netif up, hardware is alive, shut it down */ + if (ndev->flags & IFF_UP) { + rc = wil_down(wil); + if (rc) { + wil_err(wil, "wil_down : %d\n", rc); + goto out; + } + } + + if (wil->platform_ops.suspend) + rc = wil->platform_ops.suspend(wil->platform_handle); + +out: + wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + is_runtime ? "runtime" : "system", rc); + return rc; +} + +int wil_resume(struct wil6210_priv *wil, bool is_runtime) +{ + int rc = 0; + struct net_device *ndev = wil_to_ndev(wil); + + wil_dbg_pm(wil, "%s(%s)\n", __func__, + is_runtime ? "runtime" : "system"); + + if (wil->platform_ops.resume) { + rc = wil->platform_ops.resume(wil->platform_handle); + if (rc) { + wil_err(wil, "platform_ops.resume : %d\n", rc); + goto out; + } + } + + /* if netif up, bring hardware up + * During open(), IFF_UP set after actual device method + * invocation. This prevent recursive call to wil_up() + */ + if (ndev->flags & IFF_UP) + rc = wil_up(wil); + +out: + wil_dbg_pm(wil, "%s(%s) => %d\n", __func__, + is_runtime ? "runtime" : "system", rc); + return rc; +} diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index ca10dcf0986e..9238c1ac23dd 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) goto out; } + r->total++; hseq = r->head_seq_num; /** Due to the race between WMI events, where BACK establishment @@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* frame with out of date sequence number */ if (seq_less(seq, r->head_seq_num)) { r->ssn_last_drop = seq; + r->drop_old++; + wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n", + seq, r->head_seq_num); dev_kfree_skb(skb); goto out; } @@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* check if we already stored this frame */ if (r->reorder_buf[index]) { + r->drop_dup++; + wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq); dev_kfree_skb(skb); goto out; } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index aa20af86e1d6..6229110d558a 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) break; } } - iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail)); + wil_w(wil, v->hwtail, v->swtail); return rc; } @@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) [GRO_DROP] = "GRO_DROP", }; + if (ndev->features & NETIF_F_RXHASH) + /* fake L4 to ensure it won't be re-calculated later + * set hash to any non-zero value to activate rps + * mechanism, core will be chosen according + * to user-level rps configuration. + */ + skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); + skb_orphan(skb); if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { @@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, static inline void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags) { - d->mac.d[2] |= ((nr_frags + 1) << - MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); + d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS); } -static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, - struct vring_tx_desc *d, - struct sk_buff *skb) +/** + * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding + * @skb is used to obtain the protocol and headers length. + * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data, + * 2 - middle, 3 - last descriptor. + */ + +static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d, + struct sk_buff *skb, + int tso_desc_type, bool is_ipv4, + int tcp_hdr_len, int skb_net_hdr_len) { + d->dma.b11 = ETH_HLEN; /* MAC header length */ + d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS; + + d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS); + /* L4 header len: TCP header length */ + d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK); + + /* Setup TSO: bit and desc type */ + d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) | + (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS); + d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS); + + d->dma.ip_length = skb_net_hdr_len; + /* Enable TCP/UDP checksum */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS); + /* Calculate pseudo-header */ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS); +} + +/** + * Sets the descriptor @d up for csum. The corresponding + * @skb is used to obtain the protocol and headers length. + * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6. + * Note, if d==NULL, the function only returns the protocol result. + * + * It is very similar to previous wil_tx_desc_offload_setup_tso. This + * is "if unrolling" to optimize the critical path. + */ + +static int wil_tx_desc_offload_setup(struct vring_tx_desc *d, + struct sk_buff *skb){ int protocol; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil, return 0; } +static inline void wil_tx_last_desc(struct vring_tx_desc *d) +{ + d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) | + BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) | + BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS); +} + +static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d) +{ + d->dma.d0 |= wil_tso_type_lst << + DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS; +} + +static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, + struct sk_buff *skb) +{ + struct device *dev = wil_to_dev(wil); + + /* point to descriptors in shared memory */ + volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc, + *_first_desc = NULL; + + /* pointers to shadow descriptors */ + struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem, + *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem, + *first_desc = &first_desc_mem; + + /* pointer to shadow descriptors' context */ + struct wil_ctx *hdr_ctx, *first_ctx = NULL; + + int descs_used = 0; /* total number of used descriptors */ + int sg_desc_cnt = 0; /* number of descriptors for current mss*/ + + u32 swhead = vring->swhead; + int used, avail = wil_vring_avail_tx(vring); + int nr_frags = skb_shinfo(skb)->nr_frags; + int min_desc_required = nr_frags + 1; + int mss = skb_shinfo(skb)->gso_size; /* payload size w/o headers */ + int f, len, hdrlen, headlen; + int vring_index = vring - wil->vring_tx; + struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index]; + uint i = swhead; + dma_addr_t pa; + const skb_frag_t *frag = NULL; + int rem_data = mss; + int lenmss; + int hdr_compensation_need = true; + int desc_tso_type = wil_tso_type_first; + bool is_ipv4; + int tcp_hdr_len; + int skb_net_hdr_len; + int gso_type; + + wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", + __func__, skb->len, vring_index); + + if (unlikely(!txdata->enabled)) + return -EINVAL; + + /* A typical page 4K is 3-4 payloads, we assume each fragment + * is a full payload, that's how min_desc_required has been + * calculated. In real we might need more or less descriptors, + * this is the initial check only. + */ + if (unlikely(avail < min_desc_required)) { + wil_err_ratelimited(wil, + "TSO: Tx ring[%2d] full. No space for %d fragments\n", + vring_index, min_desc_required); + return -ENOMEM; + } + + /* Header Length = MAC header len + IP header len + TCP header len*/ + hdrlen = ETH_HLEN + + (int)skb_network_header_len(skb) + + tcp_hdrlen(skb); + + gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4); + switch (gso_type) { + case SKB_GSO_TCPV4: + /* TCP v4, zero out the IP length and IPv4 checksum fields + * as required by the offloading doc + */ + ip_hdr(skb)->tot_len = 0; + ip_hdr(skb)->check = 0; + is_ipv4 = true; + break; + case SKB_GSO_TCPV6: + /* TCP v6, zero out the payload length */ + ipv6_hdr(skb)->payload_len = 0; + is_ipv4 = false; + break; + default: + /* other than TCPv4 or TCPv6 types are not supported for TSO. + * It is also illegal for both to be set simultaneously + */ + return -EINVAL; + } + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return -EINVAL; + + /* tcp header length and skb network header length are fixed for all + * packet's descriptors - read then once here + */ + tcp_hdr_len = tcp_hdrlen(skb); + skb_net_hdr_len = skb_network_header_len(skb); + + _hdr_desc = &vring->va[i].tx; + + pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, pa))) { + wil_err(wil, "TSO: Skb head DMA map error\n"); + goto err_exit; + } + + wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index); + wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4, + tcp_hdr_len, skb_net_hdr_len); + wil_tx_last_desc(hdr_desc); + + vring->ctx[i].mapped_as = wil_mapped_as_single; + hdr_ctx = &vring->ctx[i]; + + descs_used++; + headlen = skb_headlen(skb) - hdrlen; + + for (f = headlen ? -1 : 0; f < nr_frags; f++) { + if (headlen) { + len = headlen; + wil_dbg_txrx(wil, "TSO: process skb head, len %u\n", + len); + } else { + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len); + } + + while (len) { + wil_dbg_txrx(wil, + "TSO: len %d, rem_data %d, descs_used %d\n", + len, rem_data, descs_used); + + if (descs_used == avail) { + wil_err(wil, "TSO: ring overflow\n"); + goto dma_error; + } + + lenmss = min_t(int, rem_data, len); + i = (swhead + descs_used) % vring->size; + wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i); + + if (!headlen) { + pa = skb_frag_dma_map(dev, frag, + frag->size - len, lenmss, + DMA_TO_DEVICE); + vring->ctx[i].mapped_as = wil_mapped_as_page; + } else { + pa = dma_map_single(dev, + skb->data + + skb_headlen(skb) - headlen, + lenmss, + DMA_TO_DEVICE); + vring->ctx[i].mapped_as = wil_mapped_as_single; + headlen -= lenmss; + } + + if (unlikely(dma_mapping_error(dev, pa))) + goto dma_error; + + _desc = &vring->va[i].tx; + + if (!_first_desc) { + _first_desc = _desc; + first_ctx = &vring->ctx[i]; + d = first_desc; + } else { + d = &desc_mem; + } + + wil_tx_desc_map(d, pa, lenmss, vring_index); + wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type, + is_ipv4, tcp_hdr_len, + skb_net_hdr_len); + + /* use tso_type_first only once */ + desc_tso_type = wil_tso_type_mid; + + descs_used++; /* desc used so far */ + sg_desc_cnt++; /* desc used for this segment */ + len -= lenmss; + rem_data -= lenmss; + + wil_dbg_txrx(wil, + "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n", + len, rem_data, descs_used, sg_desc_cnt); + + /* Close the segment if reached mss size or last frag*/ + if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) { + if (hdr_compensation_need) { + /* first segment include hdr desc for + * release + */ + hdr_ctx->nr_frags = sg_desc_cnt; + wil_tx_desc_set_nr_frags(first_desc, + sg_desc_cnt + + 1); + hdr_compensation_need = false; + } else { + wil_tx_desc_set_nr_frags(first_desc, + sg_desc_cnt); + } + first_ctx->nr_frags = sg_desc_cnt - 1; + + wil_tx_last_desc(d); + + /* first descriptor may also be the last + * for this mss - make sure not to copy + * it twice + */ + if (first_desc != d) + *_first_desc = *first_desc; + + /*last descriptor will be copied at the end + * of this TS processing + */ + if (f < nr_frags - 1 || len > 0) + *_desc = *d; + + rem_data = mss; + _first_desc = NULL; + sg_desc_cnt = 0; + } else if (first_desc != d) /* update mid descriptor */ + *_desc = *d; + } + } + + /* first descriptor may also be the last. + * in this case d pointer is invalid + */ + if (_first_desc == _desc) + d = first_desc; + + /* Last data descriptor */ + wil_set_tx_desc_last_tso(d); + *_desc = *d; + + /* Fill the total number of descriptors in first desc (hdr)*/ + wil_tx_desc_set_nr_frags(hdr_desc, descs_used); + *_hdr_desc = *hdr_desc; + + /* hold reference to skb + * to prevent skb release before accounting + * in case of immediate "tx done" + */ + vring->ctx[i].skb = skb_get(skb); + + /* performance monitoring */ + used = wil_vring_used_tx(vring); + if (wil_val_in_range(vring_idle_trsh, + used, used + descs_used)) { + txdata->idle += get_cycles() - txdata->last_idle; + wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", + vring_index, used, used + descs_used); + } + + /* advance swhead */ + wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); + wil_vring_advance_head(vring, descs_used); + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, vring->hwtail, vring->swhead); + return 0; + +dma_error: + wil_err(wil, "TSO: DMA map page error\n"); + while (descs_used > 0) { + struct wil_ctx *ctx; + + i = (swhead + descs_used) % vring->size; + d = (struct vring_tx_desc *)&vring->va[i].tx; + _desc = &vring->va[i].tx; + *d = *_desc; + _desc->dma.status = TX_DMA_STATUS_DU; + ctx = &vring->ctx[i]; + wil_txdesc_unmap(dev, d, ctx); + if (ctx->skb) + dev_kfree_skb_any(ctx->skb); + memset(ctx, 0, sizeof(*ctx)); + descs_used--; + } + +err_exit: + return -EINVAL; +} + static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, struct sk_buff *skb) { @@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, bool mcast = (vring_index == wil->bcast_vring); uint len = skb_headlen(skb); - wil_dbg_txrx(wil, "%s()\n", __func__); + wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n", + __func__, skb->len, vring_index); if (unlikely(!txdata->enabled)) return -EINVAL; @@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); } /* Process TCP/UDP checksum offloading */ - if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { + if (unlikely(wil_tx_desc_offload_setup(d, skb))) { wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", vring_index); goto dma_error; } vring->ctx[i].nr_frags = nr_frags; - wil_tx_desc_set_nr_frags(d, nr_frags); + wil_tx_desc_set_nr_frags(d, nr_frags + 1); /* middle segments */ for (; f < nr_frags; f++) { @@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, * if it succeeded for 1-st descriptor, * it will succeed here too */ - wil_tx_desc_offload_cksum_set(wil, d, skb); + wil_tx_desc_offload_setup(d, skb); } /* for the last seg only */ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS); @@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, vring->swhead); trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags); - iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail)); + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + + wil_w(wil, vring->hwtail, vring->swhead); return 0; dma_error: @@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, int rc; spin_lock(&txdata->lock); - rc = __wil_tx_vring(wil, vring, skb); + + rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring) + (wil, vring, skb); + spin_unlock(&txdata->lock); + return rc; } @@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) struct wil_ctx *ctx = &vring->ctx[vring->swtail]; /** * For the fragmented skb, HW will set DU bit only for the - * last fragment. look for it + * last fragment. look for it. + * In TSO the first DU will include hdr desc */ int lf = (vring->swtail + ctx->nr_frags) % vring->size; /* TODO: check we are not past head */ diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 0c4638487c74..82a8f9a030e7 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -291,6 +291,14 @@ struct vring_tx_dma { __le16 length; } __packed; +/* TSO type used in dma descriptor d0 bits 11-12 */ +enum { + wil_tso_type_hdr = 0, + wil_tso_type_first = 1, + wil_tso_type_mid = 2, + wil_tso_type_lst = 3, +}; + /* Rx descriptor - MAC part * [dword 0] * bit 0.. 3 : tid:4 The QoS (b3-0) TID Field diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index c63e4a35eaa0..dd4ea926b8e3 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -127,16 +127,6 @@ struct RGF_ICR { u32 IMC; /* Mask Clear, write 1 to clear */ } __packed; -struct RGF_BL { - u32 ready; /* 0x880A3C bit [0] */ -#define BIT_BL_READY BIT(0) - u32 version; /* 0x880A40 version of the BL struct */ - u32 rf_type; /* 0x880A44 ID of the connected RF */ - u32 baseband_type; /* 0x880A48 ID of the baseband */ - u8 mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */ - u8 pad[2]; -} __packed; - /* registers - FW addresses */ #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) @@ -262,9 +252,8 @@ enum { }; /* popular locations */ -#define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) -#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ - offsetof(struct RGF_ICR, ICS)) +#define RGF_MBOX RGF_USER_USER_SCRATCH_PAD +#define HOST_MBOX HOSTADDR(RGF_MBOX) #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2 /* ISR register bits */ @@ -434,12 +423,12 @@ struct pci_dev; * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs * @timeout: reset timer value (in TUs). + * @ssn_last_drop: SSN of the last dropped frame + * @total: total number of processed incoming frames + * @drop_dup: duplicate frames dropped for this reorder buffer + * @drop_old: old frames dropped for this reorder buffer * @dialog_token: dialog token for aggregation session - * @rcu_head: RCU head used for freeing this struct - * - * This structure's lifetime is managed by RCU, assignments to - * the array holding it must hold the aggregation mutex. - * + * @first_time: true when this buffer used 1-st time */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; @@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx { u16 buf_size; u16 timeout; u16 ssn_last_drop; + unsigned long long total; /* frames processed */ + unsigned long long drop_dup; + unsigned long long drop_old; u8 dialog_token; bool first_time; /* is it 1-st time this buffer used? */ }; @@ -543,7 +535,6 @@ struct pmc_ctx { struct wil6210_priv { struct pci_dev *pdev; - int n_msi; struct wireless_dev *wdev; void __iomem *csr; DECLARE_BITMAP(status, wil_status_last); @@ -656,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...); #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg) #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg) #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg) +#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg) + +/* target operations */ +/* register read */ +static inline u32 wil_r(struct wil6210_priv *wil, u32 reg) +{ + return readl(wil->csr + HOSTADDR(reg)); +} + +/* register write. wmb() to make sure it is completed */ +static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val) +{ + writel(val, wil->csr + HOSTADDR(reg)); + wmb(); /* wait for write to propagate to the HW */ +} + +/* register set = read, OR, write */ +static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val) +{ + wil_w(wil, reg, wil_r(wil, reg) | val); +} + +/* register clear = read, AND with inverted, write */ +static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) +{ + wil_w(wil, reg, wil_r(wil, reg) & ~val); +} #if defined(CONFIG_DYNAMIC_DEBUG) #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ @@ -746,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work); void wil_back_tx_flush(struct wil6210_priv *wil); void wil6210_clear_irq(struct wil6210_priv *wil); -int wil6210_init_irq(struct wil6210_priv *wil, int irq); +int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi); void wil6210_fini_irq(struct wil6210_priv *wil, int irq); void wil_mask_irq(struct wil6210_priv *wil); void wil_unmask_irq(struct wil6210_priv *wil); @@ -798,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type); int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd); int wil_request_firmware(struct wil6210_priv *wil, const char *name); +int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime); +int wil_suspend(struct wil6210_priv *wil, bool is_runtime); +int wil_resume(struct wil6210_priv *wil, bool is_runtime); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c index de15f1422fe9..2e831bf20117 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.c +++ b/drivers/net/wireless/ath/wil6210/wil_platform.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "linux/device.h" +#include #include "wil_platform.h" int __init wil_platform_modinit(void) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index c759759afbb2..7a257360c420 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { - r->tail = ioread32(wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, tx.tail)); + r->tail = wil_r(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, tx.tail)); if (next_head != r->tail) break; msleep(20); @@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); /* mark entry as full */ - iowrite32(1, wil->csr + HOSTADDR(r->head) + - offsetof(struct wil6210_mbox_ring_desc, sync)); + wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1); /* advance next ptr */ - iowrite32(r->head = next_head, wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, tx.head)); + wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head), + r->head = next_head); trace_wil6210_wmi_cmd(&cmd.wmi, buf, len); /* interrupt to FW */ - iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); + wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS), + SW_INT_MBOX); return 0; } @@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) struct wiphy *wiphy = wil_to_wiphy(wil); struct ieee80211_mgmt *rx_mgmt_frame = (struct ieee80211_mgmt *)data->payload; - int ch_no = data->info.channel+1; - u32 freq = ieee80211_channel_to_frequency(ch_no, - IEEE80211_BAND_60GHZ); - struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); - s32 signal = data->info.sqi; - __le16 fc = rx_mgmt_frame->frame_control; - u32 d_len = le32_to_cpu(data->info.len); - u16 d_status = le16_to_cpu(data->info.status); + int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload); + int ch_no; + u32 freq; + struct ieee80211_channel *channel; + s32 signal; + __le16 fc; + u32 d_len; + u16 d_status; - wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n", + if (flen < 0) { + wil_err(wil, "MGMT Rx: short event, len %d\n", len); + return; + } + + d_len = le32_to_cpu(data->info.len); + if (d_len != flen) { + wil_err(wil, + "MGMT Rx: length mismatch, d_len %d should be %d\n", + d_len, flen); + return; + } + + ch_no = data->info.channel + 1; + freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); + channel = ieee80211_get_channel(wiphy, freq); + signal = data->info.sqi; + d_status = le16_to_cpu(data->info.status); + fc = rx_mgmt_frame->frame_control; + + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", data->info.channel, data->info.mcs, data->info.snr, data->info.sqi); wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, le16_to_cpu(fc)); wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); + wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame, + d_len, true); if (!channel) { wil_err(wil, "Frame on unsupported channel\n"); @@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) } } +static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) +{ + struct wmi_tx_mgmt_packet_event *data = d; + struct ieee80211_mgmt *mgmt_frame = + (struct ieee80211_mgmt *)data->payload; + int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload); + + wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame, + flen, true); +} + static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, void *d, int len) { @@ -659,6 +692,7 @@ static const struct { {WMI_READY_EVENTID, wmi_evt_ready}, {WMI_FW_READY_EVENTID, wmi_evt_fw_ready}, {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt}, + {WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt}, {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, @@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) u16 len; bool q; - r->head = ioread32(wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, rx.head)); + r->head = wil_r(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, rx.head)); if (r->tail == r->head) break; @@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) cmd = (void *)&evt->event.wmi; wil_memcpy_fromio_32(cmd, src, len); /* mark entry as empty */ - iowrite32(0, wil->csr + HOSTADDR(r->tail) + - offsetof(struct wil6210_mbox_ring_desc, sync)); + wil_w(wil, r->tail + + offsetof(struct wil6210_mbox_ring_desc, sync), 0); /* indicate */ if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { @@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil) /* advance tail */ r->tail = r->base + ((r->tail - r->base + sizeof(struct wil6210_mbox_ring_desc)) % r->size); - iowrite32(r->tail, wil->csr + HOST_MBOX + - offsetof(struct wil6210_mbox_ctl, rx.tail)); + wil_w(wil, RGF_MBOX + + offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); /* add to the pending list */ spin_lock_irqsave(&wil->wmi_ev_lock, flags); @@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) { + static const char *const names[] = { + [WMI_FRAME_BEACON] = "BEACON", + [WMI_FRAME_PROBE_REQ] = "PROBE_REQ", + [WMI_FRAME_PROBE_RESP] = "WMI_FRAME_PROBE_RESP", + [WMI_FRAME_ASSOC_REQ] = "WMI_FRAME_ASSOC_REQ", + [WMI_FRAME_ASSOC_RESP] = "WMI_FRAME_ASSOC_RESP", + }; int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); - if (!cmd) - return -ENOMEM; + if (!cmd) { + rc = -ENOMEM; + goto out; + } if (!ie) ie_len = 0; @@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) memcpy(cmd->ie_info, ie, ie_len); rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len); kfree(cmd); +out: + if (rc) { + const char *name = type < ARRAY_SIZE(names) ? + names[type] : "??"; + wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc); + } return rc; } @@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) { + int rc; + u16 reason_code; struct wmi_disconnect_sta_cmd cmd = { .disconnect_reason = cpu_to_le16(reason), }; + struct { + struct wil6210_mbox_hdr_wmi wmi; + struct wmi_disconnect_event evt; + } __packed reply; ether_addr_copy(cmd.dst_mac, mac); wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); - return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd)); + rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd), + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); + /* failure to disconnect in reasonable time treated as FW error */ + if (rc) { + wil_fw_error_recovery(wil); + return rc; + } + + /* call event handler manually after processing wmi_call, + * to avoid deadlock - disconnect event handler acquires wil->mutex + * while it is already held here + */ + reason_code = le16_to_cpu(reply.evt.protocol_reason_status); + + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + reply.evt.bssid, reason_code, + reply.evt.disconnect_reason); + + wil->sinfo_gen++; + wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + + return 0; } int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout) @@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, /* search for handler */ if (!wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi))) { - wil_err(wil, "Unhandled event 0x%04x\n", id); + wil_info(wil, "Unhandled event 0x%04x\n", id); } } else { wil_err(wil, "Unknown event type\n"); diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 25d1cbd34306..b2f0d245bcf3 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) switch (phy->rev) { case 6: case 5: - if (sprom->fem.ghz5.extpa_gain == 3) + if (sprom->fem.ghz2.extpa_gain == 3) return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; /* fall through */ case 4: diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c index d36f5f3d931b..f990e3d0e696 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c @@ -2564,15 +2564,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus) } } -static void atomic_orr(int val, atomic_t *v) -{ - int old_val; - - old_val = atomic_read(v); - while (atomic_cmpxchg(v, old_val, val | old_val) != old_val) - old_val = atomic_read(v); -} - static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) { struct brcmf_core *buscore; @@ -2595,7 +2586,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus) if (val) { brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret); bus->sdcnt.f1regdata++; - atomic_orr(val, &bus->intstatus); + atomic_or(val, &bus->intstatus); } return ret; @@ -2712,7 +2703,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) /* Keep still-pending events for next scheduling */ if (intstatus) - atomic_orr(intstatus, &bus->intstatus); + atomic_or(intstatus, &bus->intstatus); brcmf_sdio_clrintr(bus); diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 01de1a3bf94e..80d4228ba754 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c @@ -865,7 +865,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, switch(type) { case HOSTAP_INTERFACE_AP: - dev->tx_queue_len = 0; /* use main radio device queue */ + dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_mgmt_netdev_ops; dev->type = ARPHRD_IEEE80211; dev->header_ops = &hostap_80211_ops; @@ -874,7 +874,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local, dev->netdev_ops = &hostap_master_ops; break; default: - dev->tx_queue_len = 0; /* use main radio device queue */ + dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_netdev_ops; } diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h index c160dad03037..edc3dd42f8f8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) void iwl_down(struct iwl_priv *priv); void iwl_cancel_deferred_work(struct iwl_priv *priv); void iwlagn_prepare_restart(struct iwl_priv *priv); -int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); bool iwl_check_for_ct_kill(struct iwl_priv *priv); @@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); -int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); static inline u32 iwl_tx_status_to_mac80211(u32 status) { @@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv, /* bt coex */ void iwlagn_send_advance_bt_config(struct iwl_priv *priv); -int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv); void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); @@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_link_quality_cmd *lq, u8 flags, bool init); -int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 0ffb6ff1a255..b15e44f8d1bd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file, pos += scnprintf(buf + pos, buf_size - pos, "NVM version: 0x%x\n", nvm_ver); for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { - pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, - buf_size - pos, 0); - pos += strlen(buf + pos); - if (buf_size - pos > 0) - buf[pos++] = '\n'; + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n", + ofs, ptr + ofs); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index 3811878ab9cd..0ba3e56d6015 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -669,6 +669,8 @@ struct iwl_priv { /* ieee device used by generic ieee processing code */ struct ieee80211_hw *hw; + struct napi_struct *napi; + struct list_head calib_results; struct workqueue_struct *workqueue; @@ -678,9 +680,8 @@ struct iwl_priv { enum ieee80211_band band; u8 valid_contexts; - int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); struct iwl_notif_wait_data notif_wait; diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 1d2223df5cb0..ab45819c1fbb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv, return need_update; } -int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data; @@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { /* bt coex disabled */ - return 0; + return; } IWL_DEBUG_COEX(priv, "BT Coex notification:\n"); @@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, /* FIXME: based on notification, adjust the prio_boost */ priv->bt_ci_compliance = coex->bt_ci_compliance; - return 0; } void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 7acaa266b704..453f7c315ab5 100644 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv) } } + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + ret = iwl_run_init_ucode(priv); if (ret) { IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret); goto error; } + ret = iwl_trans_start_hw(priv->trans); + if (ret) { + IWL_ERR(priv, "Failed to start HW: %d\n", ret); + goto error; + } + ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret); @@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) u32 error_id; } err_info; struct iwl_notification_wait status_wait; - static const u8 status_cmd[] = { + static const u16 status_cmd[] = { REPLY_WOWLAN_GET_STATUS, }; struct iwlagn_wowlan_status status_data = {}; diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c index 234e30f498b2..e7616f0ee6e8 100644 --- a/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/iwlwifi/dvm/main.c @@ -2029,17 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) return false; } -static void iwl_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - - ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight); -} - static const struct iwl_op_mode_ops iwl_dvm_ops = { .start = iwl_op_mode_dvm_start, .stop = iwl_op_mode_dvm_stop, @@ -2052,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = { .cmd_queue_full = iwl_cmd_queue_full, .nic_config = iwl_nic_config, .wimax_active = iwl_wimax_active, - .napi_add = iwl_napi_add, }; /***************************************************************************** diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 3bd7c86e90d9..cef921c1a623 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv, /* * Try to switch to new modulation mode from legacy */ -static int rs_move_legacy_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, - int index) +static void rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) { struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct iwl_scale_tbl_info *search_tbl = @@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv, } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; @@ -1584,17 +1584,15 @@ out: tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - return 0; - } /* * Try to switch to new modulation mode from SISO */ -static int rs_move_siso_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { u8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; @@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv, tbl->action = IWL_SISO_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - - return 0; } /* * Try to switch to new modulation mode from MIMO2 */ -static int rs_move_mimo2_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { s8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; tbl->action++; @@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv, if (update_search_tbl_counter) search_tbl->action = tbl->action; - return 0; - } /* * Try to switch to new modulation mode from MIMO3 */ -static int rs_move_mimo3_to_other(struct iwl_priv *priv, - struct iwl_lq_sta *lq_sta, - struct ieee80211_conf *conf, - struct ieee80211_sta *sta, int index) +static void rs_move_mimo3_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) { s8 is_green = lq_sta->is_green; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); @@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv, break; } search_tbl->lq_type = LQ_NONE; - return 0; + return; out: lq_sta->search_better_tbl = 1; tbl->action++; @@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv, tbl->action = IWL_MIMO3_SWITCH_ANTENNA1; if (update_search_tbl_counter) search_tbl->action = tbl->action; - - return 0; - } /* diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c index debec963c610..4785203ae203 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = { * ******************************************************************************/ -static int iwlagn_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; @@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv, err_resp->cmd_id, le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_info)); - return 0; } -static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_csa_notification *csa = (void *)pkt->data; @@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct iwl_rxon_cmd *rxon = (void *)&ctx->active; if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) - return 0; + return; if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { rxon->channel = csa->channel; @@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, le16_to_cpu(csa->channel)); iwl_chswitch_done(priv, false); } - return 0; } -static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_spectrum_notification *report = (void *)pkt->data; @@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv, if (!report->state) { IWL_DEBUG_11H(priv, "Spectrum Measure Notification: Start\n"); - return 0; + return; } memcpy(&priv->measure_report, report, sizeof(*report)); priv->measurement_status |= MEASUREMENT_READY; - return 0; } -static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", sleep->pm_sleep_mode, sleep->pm_wakeup_src); #endif - return 0; } -static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 __maybe_unused len = iwl_rx_packet_len(pkt); IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len); iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); - return 0; } -static int iwlagn_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwlagn_beacon_notif *beacon = (void *)pkt->data; @@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv, #endif priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - - return 0; } /** @@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv, } #endif -static int iwlagn_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { unsigned long stamp = jiffies; const int reg_recalib_period = 60; @@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, len, sizeof(struct iwl_bt_notif_statistics), sizeof(struct iwl_notif_statistics)); spin_unlock(&priv->statistics.lock); - return 0; + return; } change = common->temperature != priv->statistics.common.temperature || @@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv, priv->lib->temperature(priv); spin_unlock(&priv->statistics.lock); - - return 0; } -static int iwlagn_rx_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_notif_statistics *stats = (void *)pkt->data; @@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv, #endif IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); } - iwlagn_rx_statistics(priv, rxb, cmd); - return 0; + + iwlagn_rx_statistics(priv, rxb); } /* Handle notification from uCode that card's power state is changing * due to software, hardware, or critical temperature RFKILL */ -static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; @@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv, test_bit(STATUS_RF_KILL_HW, &priv->status))) wiphy_rfkill_set_hw_state(priv->hw->wiphy, test_bit(STATUS_RF_KILL_HW, &priv->status)); - return 0; } -static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv, if (!test_bit(STATUS_SCANNING, &priv->status)) iwl_init_sensitivity(priv); } - return 0; } /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ -static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, priv->ampdu_ref++; memcpy(&priv->last_phy_res, pkt->data, sizeof(struct iwl_rx_phy_res)); - return 0; } /* @@ -786,7 +763,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - ieee80211_rx(priv->hw, skb); + ieee80211_rx_napi(priv->hw, skb, priv->napi); } static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) @@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv, } /* Called for REPLY_RX_MPDU_CMD */ -static int iwlagn_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status = {}; @@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, if (!priv->last_phy_res_valid) { IWL_ERR(priv, "MPDU frame without cached PHY data\n"); - return 0; + return; } phy_res = &priv->last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data; @@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, if ((unlikely(phy_res->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n", phy_res->cfg_phy_cnt); - return 0; + return; } if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); - return 0; + return; } /* This will be used in several places later */ @@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv, iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, rxb, &rx_status); - return 0; } -static int iwlagn_rx_noa_notification(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwlagn_rx_noa_notification(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_wipan_noa_data *new_data, *old_data; struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv, if (old_data) kfree_rcu(old_data, rcu_head); - - return 0; } /** @@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv, */ void iwl_setup_rx_handlers(struct iwl_priv *priv) { - int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb); handlers = priv->rx_handlers; @@ -1102,12 +1073,11 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv) iwlagn_bt_rx_handler_setup(priv); } -int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - int err = 0; /* * Do the notification wait before RX handlers so @@ -1121,12 +1091,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, * rx_handlers table. See iwl_setup_rx_handlers() */ if (priv->rx_handlers[pkt->hdr.cmd]) { priv->rx_handlers_stats[pkt->hdr.cmd]++; - err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd); + priv->rx_handlers[pkt->hdr.cmd](priv, rxb); } else { /* No handling needed */ IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n", iwl_dvm_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); } - return err; } diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index ed50de6362ed..85ceceb34fcc 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; - static const u8 deactivate_cmd[] = { + static const u16 deactivate_cmd[] = { REPLY_WIPAN_DEACTIVATION_COMPLETE }; diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c index 43bef901e8f9..648159495bbc 100644 --- a/drivers/net/wireless/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/iwlwifi/dvm/scan.c @@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) } /* Service response to REPLY_SCAN_CMD (0x80) */ -static int iwl_rx_reply_scan(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv, IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); #endif - return 0; } /* Service SCAN_START_NOTIFICATION (0x82) */ -static int iwl_rx_scan_start_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_scanstart_notification *notif = (void *)pkt->data; @@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv, le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer); - - return 0; } /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ -static int iwl_rx_scan_results_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { #ifdef CONFIG_IWLWIFI_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv, le32_to_cpu(notif->statistics[0]), le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); #endif - return 0; } /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ -static int iwl_rx_scan_complete_notif(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data; @@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv, queue_work(priv->workqueue, &priv->bt_traffic_change_work); } - return 0; } void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index 6ec86adbe4a1..0fa67d3b7235 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) return 0; } -static int iwl_process_add_sta_resp(struct iwl_priv *priv, - struct iwl_addsta_cmd *addsta, - struct iwl_rx_packet *pkt) +static void iwl_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) { struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data; - u8 sta_id = addsta->sta.sta_id; - int ret = -EIO; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", - pkt->hdr.flags); - return ret; - } - - IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", - sta_id); + IWL_DEBUG_INFO(priv, "Processing response for adding station\n"); spin_lock_bh(&priv->sta_lock); switch (add_sta_resp->status) { case ADD_STA_SUCCESS_MSK: IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); - ret = iwl_sta_ucode_activate(priv, sta_id); break; case ADD_STA_NO_ROOM_IN_TABLE: - IWL_ERR(priv, "Adding station %d failed, no room in table.\n", - sta_id); + IWL_ERR(priv, "Adding station failed, no room in table.\n"); break; case ADD_STA_NO_BLOCK_ACK_RESOURCE: - IWL_ERR(priv, "Adding station %d failed, no block ack " - "resource.\n", sta_id); + IWL_ERR(priv, + "Adding station failed, no block ack resource.\n"); break; case ADD_STA_MODIFY_NON_EXIST_STA: - IWL_ERR(priv, "Attempting to modify non-existing station %d\n", - sta_id); + IWL_ERR(priv, "Attempting to modify non-existing station\n"); break; default: IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", @@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv, break; } - IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - sta_id, priv->stations[sta_id].sta.sta.addr); - - /* - * XXX: The MAC address in the command buffer is often changed from - * the original sent to the device. That is, the MAC address - * written to the command buffer often is not the same MAC address - * read from the command buffer when the command returns. This - * issue has not yet been resolved and this debugging is left to - * observe the problem. - */ - IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", - priv->stations[sta_id].sta.mode == - STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", - addsta->sta.addr); spin_unlock_bh(&priv->sta_lock); - - return ret; } -int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - if (!cmd) - return 0; - - return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt); + iwl_process_add_sta_resp(priv, pkt); } int iwl_send_add_sta(struct iwl_priv *priv, @@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv, .len = { sizeof(*sta), }, }; u8 sta_id __maybe_unused = sta->sta.sta_id; + struct iwl_rx_packet *pkt; + struct iwl_add_sta_resp *add_sta_resp; IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); @@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv, if (ret || (flags & CMD_ASYNC)) return ret; - /*else the command was successfully sent in SYNC mode, need to free - * the reply page */ + + pkt = cmd.resp_pkt; + add_sta_resp = (void *)pkt->data; + + /* debug messages are printed in the handler */ + if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) { + spin_lock_bh(&priv->sta_lock); + ret = iwl_sta_ucode_activate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); + } else { + ret = -EIO; + } iwl_free_resp(&cmd); - if (cmd.handler_status) - IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__, - cmd.handler_status); - - return cmd.handler_status; + return ret; } bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, @@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, struct iwl_rx_packet *pkt; int ret; struct iwl_rem_sta_cmd rm_sta_cmd; + struct iwl_rem_sta_resp *rem_sta_resp; struct iwl_host_cmd cmd = { .id = REPLY_REMOVE_STA, @@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv, return ret; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", - pkt->hdr.flags); + rem_sta_resp = (void *)pkt->data; + + switch (rem_sta_resp->status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_bh(&priv->sta_lock); + iwl_sta_ucode_deactivate(priv, sta_id); + spin_unlock_bh(&priv->sta_lock); + } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; } - if (!ret) { - struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data; - switch (rem_sta_resp->status) { - case REM_STA_SUCCESS_MSK: - if (!temporary) { - spin_lock_bh(&priv->sta_lock); - iwl_sta_ucode_deactivate(priv, sta_id); - spin_unlock_bh(&priv->sta_lock); - } - IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); - break; - default: - ret = -EIO; - IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); - break; - } - } iwl_free_resp(&cmd); return ret; diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 275df12a6045..bddd19769035 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv, } } -int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, skb = __skb_dequeue(&skbs); ieee80211_tx_status(priv->hw, skb); } - - return 0; } /** @@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, * Handles block-acknowledge notification from device, which reports success * of frames sent via aggregation. */ -int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; @@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, if (scd_flow >= priv->cfg->base_params->num_of_queues) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); - return 0; + return; } sta_id = ba_resp->sta_id; @@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, if (unlikely(ba_resp->bitmap)) IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_bh(&priv->sta_lock); - return 0; + return; } if (unlikely(scd_flow != agg->txq_id)) { @@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n", scd_flow, sta_id, tid, agg->txq_id); spin_unlock_bh(&priv->sta_lock); - return 0; + return; } __skb_queue_head_init(&reclaimed_skbs); @@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(priv->hw, skb); } - - return 0; } diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 5244e43bfafb..931a8e4269ef 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c @@ -3,6 +3,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, const struct fw_img *fw; int ret; enum iwl_ucode_type old_type; - static const u8 alive_cmd[] = { REPLY_ALIVE }; + static const u16 alive_cmd[] = { REPLY_ALIVE }; fw = iwl_get_ucode_image(priv, ucode_type); if (WARN_ON(!fw)) @@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait, int iwl_run_init_ucode(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; - static const u8 calib_complete[] = { + static const u16 calib_complete[] = { CALIBRATION_RES_NOTIFICATION, CALIBRATION_COMPLETE_NOTIFICATION }; diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index cc35f796d406..413b63e09717 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,14 +69,14 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 15 +#define IWL7260_UCODE_API_MAX 16 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 12 #define IWL3165_UCODE_API_OK 13 /* Lowest firmware API version supported */ -#define IWL7260_UCODE_API_MIN 10 +#define IWL7260_UCODE_API_MIN 12 #define IWL3165_UCODE_API_MIN 13 /* NVM versions */ diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 72040cd0b979..8324bc8987a9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,13 +69,13 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 15 +#define IWL8000_UCODE_API_MAX 16 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 12 /* Lowest firmware API version supported */ -#define IWL8000_UCODE_API_MIN 10 +#define IWL8000_UCODE_API_MIN 12 /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d @@ -97,8 +97,9 @@ #define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" #define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" -/* Max SDIO RX aggregation size of the ADDBA request/response */ -#define MAX_RX_AGG_SIZE_8260_SDIO 28 +/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */ +#define MAX_RX_AGG_SIZE_8260_SDIO 21 +#define MAX_TX_AGG_SIZE_8260_SDIO 40 /* Max A-MPDU exponent for HT and VHT */ #define MAX_HT_AMPDU_EXPONENT_8260_SDIO IEEE80211_HT_MAX_AMPDU_32K @@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ .d0i3 = true, \ + .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ .dccm_len = IWL8260_DCCM_LEN, \ @@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO, @@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO, + .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO, .bt_shared_single_ant = true, .disable_dummy_notification = true, .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO, diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h index 08c14afeb148..939fa229c038 100644 --- a/drivers/net/wireless/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/iwlwifi/iwl-config.h @@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff { * mode set * @d0i3: device uses d0i3 instead of d3 * @nvm_hw_section_num: the ID of the HW NVM section + * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response @@ -348,6 +349,7 @@ struct iwl_cfg { bool no_power_up_nic_in_init; const char *default_nvm_file_B_step; const char *default_nvm_file_C_step; + netdev_features_t features; unsigned int max_rx_agg_size; bool disable_dummy_notification; unsigned int max_tx_agg_size; diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index faa17f2e352a..543abeaffcf0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -200,6 +200,7 @@ #define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ #define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ #define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_PAGING (1 << 24) /* SDIO PAGING */ #define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ #define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ #define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */ @@ -210,6 +211,7 @@ CSR_INT_BIT_HW_ERR | \ CSR_INT_BIT_FH_TX | \ CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_PAGING | \ CSR_INT_BIT_RF_KILL | \ CSR_INT_BIT_SW_RX | \ CSR_INT_BIT_WAKEUP | \ @@ -422,6 +424,7 @@ enum { /* DRAM INT TABLE */ #define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28) #define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) /* diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h index 04e6649340b8..71a78cede9b0 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h @@ -35,8 +35,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data, TP_PROTO(const struct device *dev, struct sk_buff *skb, - void *data, size_t data_len), - TP_ARGS(dev, skb, data, data_len), + u8 hdr_len, size_t data_len), + TP_ARGS(dev, skb, hdr_len, data_len), TP_STRUCT__entry( DEV_ENTRY @@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data, TP_fast_assign( DEV_ASSIGN; if (iwl_trace_data(skb)) - memcpy(__get_dynamic_array(data), data, data_len); + skb_copy_bits(skb, hdr_len, + __get_dynamic_array(data), data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) ); diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h index 948ce0802fa7..eb4b99a1c8cd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h @@ -36,7 +36,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd, TP_PROTO(const struct device *dev, struct iwl_host_cmd *cmd, u16 total_size, - struct iwl_cmd_header *hdr), + struct iwl_cmd_header_wide *hdr), TP_ARGS(dev, cmd, total_size, hdr), TP_STRUCT__entry( DEV_ENTRY @@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd, __field(u32, flags) ), TP_fast_assign( - int i, offset = sizeof(*hdr); + int i, offset = sizeof(struct iwl_cmd_header); + + if (hdr->group_id) + offset = sizeof(struct iwl_cmd_header_wide); DEV_ASSIGN; __entry->flags = cmd->flags; - memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); + memcpy(__get_dynamic_array(hcmd), hdr, offset); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { if (!cmd->len[i]) @@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd, offset += cmd->len[i]; } ), - TP_printk("[%s] hcmd %#.2x (%ssync)", - __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0], + TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)", + __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1], + ((u8 *)__get_dynamic_array(hcmd))[0], __entry->flags & CMD_ASYNC ? "a" : "") ); diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 6685259927f8..a86aa5bcee7d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } +static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, + const u32 len) +{ + struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; + struct iwl_gscan_capabilities *capa = &fw->gscan_capa; + + if (len < sizeof(*fw_capa)) + return -EINVAL; + + capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); + capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); + capa->max_ap_cache_per_scan = + le32_to_cpu(fw_capa->max_ap_cache_per_scan); + capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); + capa->max_scan_reporting_threshold = + le32_to_cpu(fw_capa->max_scan_reporting_threshold); + capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); + capa->max_significant_change_aps = + le32_to_cpu(fw_capa->max_significant_change_aps); + capa->max_bssid_history_entries = + le32_to_cpu(fw_capa->max_bssid_history_entries); + return 0; +} + /* * Gets uCode section from tlv. */ @@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, size_t len = ucode_raw->size; const u8 *data; u32 tlv_len; + u32 usniffer_img; enum iwl_ucode_tlv_type tlv_type; const u8 *tlv_data; char buildstr[25]; - u32 build; + u32 build, paging_mem_size; int num_of_cpus; bool usniffer_images = false; bool usniffer_req = false; + bool gscan_capa = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); @@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, IWL_UCODE_REGULAR_USNIFFER, tlv_len); break; + case IWL_UCODE_TLV_PAGING: + if (tlv_len != sizeof(u32)) + goto invalid_tlv_len; + paging_mem_size = le32_to_cpup((__le32 *)tlv_data); + + IWL_DEBUG_FW(drv, + "Paging: paging enabled (size = %u bytes)\n", + paging_mem_size); + + if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) { + IWL_ERR(drv, + "Paging: driver supports up to %lu bytes for paging image\n", + MAX_PAGING_IMAGE_SIZE); + return -EINVAL; + } + + if (paging_mem_size & (FW_PAGING_SIZE - 1)) { + IWL_ERR(drv, + "Paging: image isn't multiple %lu\n", + FW_PAGING_SIZE); + return -EINVAL; + } + + drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size = + paging_mem_size; + usniffer_img = IWL_UCODE_REGULAR_USNIFFER; + drv->fw.img[usniffer_img].paging_mem_size = + paging_mem_size; + break; case IWL_UCODE_TLV_SDIO_ADMA_ADDR: if (tlv_len != sizeof(u32)) goto invalid_tlv_len; drv->fw.sdio_adma_addr = le32_to_cpup((__le32 *)tlv_data); break; + case IWL_UCODE_TLV_FW_GSCAN_CAPA: + if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) + goto invalid_tlv_len; + gscan_capa = true; + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } + /* + * If ucode advertises that it supports GSCAN but GSCAN + * capabilities TLV is not present, warn and continue without GSCAN. + */ + if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && + WARN(!gscan_capa, + "GSCAN is supported but capabilities TLV is unavailable\n")) + __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, + capa->_capa); + return 0; invalid_tlv_len: diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c index 21302b6f2bfd..acc3d186c5c1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c @@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_channel *chan = &data->channels[0]; int n = 0, idx = 0; - while (chan->band != band && idx < n_channels) + while (idx < n_channels && chan->band != band) chan = &data->channels[++idx]; sband->channels = &data->channels[idx]; - while (chan->band == band && idx < n_channels) { + while (idx < n_channels && chan->band == band) { chan = &data->channels[++idx]; n++; } diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d45dc021cda2..d56064861a9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 -/* - * RX related structures and functions - */ -#define RX_FREE_BUFFERS 64 -#define RX_LOW_WATERMARK 8 - /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index e57dbd0ef2e1..af5b3201492c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -84,6 +84,8 @@ * @IWL_FW_ERROR_DUMP_MEM: chunk of memory * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump. * Structured as &struct iwl_fw_error_dump_trigger_desc. + * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as + * &struct iwl_fw_error_dump_rb */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_FH_REGS = 8, IWL_FW_ERROR_DUMP_MEM = 9, IWL_FW_ERROR_DUMP_ERROR_INFO = 10, + IWL_FW_ERROR_DUMP_RB = 11, IWL_FW_ERROR_DUMP_MAX, }; @@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem { u8 data[]; }; +/** + * struct iwl_fw_error_dump_rb - content of an Receive Buffer + * @index: the index of the Receive Buffer in the Rx queue + * @rxq: the RB's Rx queue + * @reserved: + * @data: the content of the Receive Buffer + */ +struct iwl_fw_error_dump_rb { + __le32 index; + __le32 rxq; + __le32 reserved; + u8 data[]; +}; + /** * iwl_fw_error_next_data - advance fw error dump data pointer * @data: previous data block diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index a9b5ae4ebec0..75809abee759 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_API_CHANGES_SET = 29, IWL_UCODE_TLV_ENABLED_CAPABILITIES = 30, IWL_UCODE_TLV_N_SCAN_CHANNELS = 31, + IWL_UCODE_TLV_PAGING = 32, IWL_UCODE_TLV_SEC_RT_USNIFFER = 34, IWL_UCODE_TLV_SDIO_ADMA_ADDR = 35, IWL_UCODE_TLV_FW_VERSION = 36, IWL_UCODE_TLV_FW_DBG_DEST = 38, IWL_UCODE_TLV_FW_DBG_CONF = 39, IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, + IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, }; struct iwl_ucode_tlv { @@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power. - * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, - * regardless of the band or the number of the probes. FW will calculate - * the actual dwell time. + * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler * through the dedicated host command. * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too. @@ -266,7 +266,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9, IWL_UCODE_TLV_API_HDC_PHASE_0 = (__force iwl_ucode_tlv_api_t)10, IWL_UCODE_TLV_API_TX_POWER_DEV = (__force iwl_ucode_tlv_api_t)11, - IWL_UCODE_TLV_API_BASIC_DWELL = (__force iwl_ucode_tlv_api_t)13, + IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14, IWL_UCODE_TLV_API_SCD_CFG = (__force iwl_ucode_tlv_api_t)15, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = (__force iwl_ucode_tlv_api_t)16, IWL_UCODE_TLV_API_ASYNC_DTM = (__force iwl_ucode_tlv_api_t)17, @@ -284,6 +284,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer + * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report @@ -298,6 +299,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command + * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different @@ -305,12 +307,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC + * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan */ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, + IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, @@ -320,10 +324,12 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, + IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, }; /* The default calibrate table size if not specified by firmware file */ @@ -341,8 +347,9 @@ enum iwl_ucode_tlv_capa { * For 16.0 uCode and above, there is no differentiation between sections, * just an offset to the HW address. */ -#define IWL_UCODE_SECTION_MAX 12 +#define IWL_UCODE_SECTION_MAX 16 #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC +#define PAGING_SEPARATOR_SECTION 0xAAAABBBB /* uCode version contains 4 values: Major/Minor/API/Serial */ #define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) @@ -412,6 +419,12 @@ enum iwl_fw_dbg_reg_operator { PRPH_ASSIGN, PRPH_SETBIT, PRPH_CLEARBIT, + + INDIRECT_ASSIGN, + INDIRECT_SETBIT, + INDIRECT_CLEARBIT, + + PRPH_BLOCKBIT, }; /** @@ -485,10 +498,13 @@ struct iwl_fw_dbg_conf_hcmd { * * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data + * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to + * collect only monitor data */ enum iwl_fw_dbg_trigger_mode { IWL_FW_DBG_TRIGGER_START = BIT(0), IWL_FW_DBG_TRIGGER_STOP = BIT(1), + IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2), }; /** @@ -718,4 +734,28 @@ struct iwl_fw_dbg_conf_tlv { struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; +/** + * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_fw_gscan_capabilities { + __le32 max_scan_cache_size; + __le32 max_scan_buckets; + __le32 max_ap_cache_per_scan; + __le32 max_rssi_sample_size; + __le32 max_scan_reporting_threshold; + __le32 max_hotlist_aps; + __le32 max_significant_change_aps; + __le32 max_bssid_history_entries; +} __packed; + #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 3e3c9d8b3c37..45e732150d28 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -133,6 +133,7 @@ struct fw_desc { struct fw_img { struct fw_desc sec[IWL_UCODE_SECTION_MAX]; bool is_dual_cpus; + u32 paging_mem_size; }; struct iwl_sf_region { @@ -140,6 +141,48 @@ struct iwl_sf_region { u32 size; }; +/* + * Block paging calculations + */ +#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */ +#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */ +#define PAGE_PER_GROUP_2_EXP_SIZE 3 +/* 8 pages per group */ +#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE) +/* don't change, support only 32KB size */ +#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE) +/* 32K == 2^15 */ +#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE) + +/* + * Image paging calculations + */ +#define BLOCK_PER_IMAGE_2_EXP_SIZE 5 +/* 2^5 == 32 blocks per image */ +#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE) +/* maximum image size 1024KB */ +#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE) + +/* Virtual address signature */ +#define PAGING_ADDR_SIG 0xAA000000 + +#define PAGING_CMD_IS_SECURED BIT(9) +#define PAGING_CMD_IS_ENABLED BIT(8) +#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS 0 +#define PAGING_TLV_SECURE_MASK 1 + +/** + * struct iwl_fw_paging + * @fw_paging_phys: page phy pointer + * @fw_paging_block: pointer to the allocated block + * @fw_paging_size: page size + */ +struct iwl_fw_paging { + dma_addr_t fw_paging_phys; + struct page *fw_paging_block; + u32 fw_paging_size; +}; + /** * struct iwl_fw_cscheme_list - a cipher scheme list * @size: a number of entries @@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list { struct iwl_fw_cipher_scheme cs[]; } __packed; +/** + * struct iwl_gscan_capabilities - gscan capabilities supported by FW + * @max_scan_cache_size: total space allocated for scan results (in bytes). + * @max_scan_buckets: maximum number of channel buckets. + * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. + * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. + * @max_scan_reporting_threshold: max possible report threshold. in percentage. + * @max_hotlist_aps: maximum number of entries for hotlist APs. + * @max_significant_change_aps: maximum number of entries for significant + * change APs. + * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can + * hold. + */ +struct iwl_gscan_capabilities { + u32 max_scan_cache_size; + u32 max_scan_buckets; + u32 max_ap_cache_per_scan; + u32 max_rssi_sample_size; + u32 max_scan_reporting_threshold; + u32 max_hotlist_aps; + u32 max_significant_change_aps; + u32 max_bssid_history_entries; +}; + /** * struct iwl_fw - variables associated with the firmware * @@ -208,6 +275,7 @@ struct iwl_fw { struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; + struct iwl_gscan_capabilities gscan_capa; }; static inline const char *get_fw_dbg_mode_string(int mode) diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c index b5bc959b1dfe..6caf2affbbb5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, continue; for (i = 0; i < w->n_cmds; i++) { - if (w->cmds[i] == pkt->hdr.cmd) { + if (w->cmds[i] == + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { found = true; break; } @@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits); void iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, - const u8 *cmds, int n_cmds, + const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data), void *fn_data) @@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, wait_entry->fn = fn; wait_entry->fn_data = fn_data; wait_entry->n_cmds = n_cmds; - memcpy(wait_entry->cmds, cmds, n_cmds); + memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16)); wait_entry->triggered = false; wait_entry->aborted = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h index 95af97a6c2cf..dbe8234521de 100644 --- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h +++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -105,7 +106,7 @@ struct iwl_notification_wait { struct iwl_rx_packet *pkt, void *data); void *fn_data; - u8 cmds[MAX_NOTIF_CMDS]; + u16 cmds[MAX_NOTIF_CMDS]; u8 n_cmds; bool triggered, aborted; }; @@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, - const u8 *cmds, int n_cmds, + const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data), void *fn_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index ce1cdd7604e8..b47fe9d6b97a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -116,10 +116,6 @@ struct iwl_cfg; * May sleep * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the * HCMD this Rx responds to. Can't sleep. - * @napi_add: NAPI initialization. The transport is fully responsible for NAPI, - * but the higher layers need to know about it (in particular mac80211 to - * to able to call the right NAPI RX functions); this function is needed - * to eventually call netif_napi_add() with higher layer involvement. * @queue_full: notifies that a HW queue is full. * Must be atomic and called with BH disabled. * @queue_not_full: notifies that a HW queue is not full any more. @@ -148,13 +144,8 @@ struct iwl_op_mode_ops { const struct iwl_fw *fw, struct dentry *dbgfs_dir); void (*stop)(struct iwl_op_mode *op_mode); - int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); - void (*napi_add)(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight); + void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); void (*queue_full)(struct iwl_op_mode *op_mode, int queue); void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); @@ -188,11 +179,11 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) op_mode->ops->stop(op_mode); } -static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { - return op_mode->ops->rx(op_mode, rxb, cmd); + return op_mode->ops->rx(op_mode, napi, rxb); } static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, @@ -260,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode) return op_mode->ops->exit_d0i3(op_mode); } -static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - if (!op_mode->ops->napi_add) - return; - op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight); -} - #endif /* __iwl_op_mode_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 5af1c776d2d4..3ab777f79e4f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -253,6 +253,7 @@ #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) #define SCD_GP_CTRL_ENABLE_31_QUEUES BIT(0) +#define SCD_GP_CTRL_AUTO_ACTIVE_MODE BIT(18) /* Context Data */ #define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600) @@ -291,6 +292,9 @@ /*********************** END TX SCHEDULER *************************************/ +/* tcp checksum offload */ +#define RX_EN_CSUM (0x00a00d88) + /* Oscillator clock */ #define OSC_CLK (0xa04068) #define OSC_CLK_FORCE_CONTROL (0x8) @@ -379,6 +383,8 @@ enum aux_misc_master1_en { #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 +#define SB_CPU_1_STATUS 0xA01E30 +#define SB_CPU_2_STATUS 0xA01E34 /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 @@ -386,4 +392,10 @@ enum { LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0), }; +/* FW chicken bits */ +#define LMPM_PAGE_PASS_NOTIF 0xA03824 +enum { + LMPM_PAGE_PASS_NOTIF_POS = BIT(20), +}; + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 87a230a7f4b6..c829c505e141 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -122,6 +122,40 @@ #define INDEX_TO_SEQ(i) ((i) & 0xff) #define SEQ_RX_FRAME cpu_to_le16(0x8000) +/* + * those functions retrieve specific information from + * the id field in the iwl_host_cmd struct which contains + * the command id, the group id and the version of the command + * and vice versa +*/ +static inline u8 iwl_cmd_opcode(u32 cmdid) +{ + return cmdid & 0xFF; +} + +static inline u8 iwl_cmd_groupid(u32 cmdid) +{ + return ((cmdid & 0xFF00) >> 8); +} + +static inline u8 iwl_cmd_version(u32 cmdid) +{ + return ((cmdid & 0xFF0000) >> 16); +} + +static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) +{ + return opcode + (groupid << 8) + (version << 16); +} + +/* make u16 wide id out of u8 group and opcode */ +#define WIDE_ID(grp, opcode) ((grp << 8) | opcode) + +/* due to the conversion, this group is special; new groups + * should be defined in the appropriate fw-api header files + */ +#define IWL_ALWAYS_LONG_GROUP 1 + /** * struct iwl_cmd_header * @@ -130,7 +164,7 @@ */ struct iwl_cmd_header { u8 cmd; /* Command ID: REPLY_RXON, etc. */ - u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ + u8 group_id; /* * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver @@ -154,9 +188,22 @@ struct iwl_cmd_header { __le16 sequence; } __packed; -/* iwl_cmd_header flags value */ -#define IWL_CMD_FAILED_MSK 0x40 - +/** + * struct iwl_cmd_header_wide + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + * this is the wide version that contains more information about the command + * like length, version and command type + */ +struct iwl_cmd_header_wide { + u8 cmd; + u8 group_id; + __le16 sequence; + __le16 length; + u8 reserved; + u8 version; +} __packed; #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_INVALID 0x55550000 @@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt) * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle. * @CMD_WAKE_UP_TRANS: The command response should wake up the trans * (i.e. mark it as non-idle). + * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to + * check that we leave enough room for the TBs bitmap which needs 20 bits. */ enum CMD_MODE { CMD_ASYNC = BIT(0), @@ -210,6 +259,8 @@ enum CMD_MODE { CMD_SEND_IN_IDLE = BIT(4), CMD_MAKE_TRANS_IDLE = BIT(5), CMD_WAKE_UP_TRANS = BIT(6), + + CMD_TB_BITMAP_POS = 11, }; #define DEF_CMD_PAYLOAD_SIZE 320 @@ -222,8 +273,18 @@ enum CMD_MODE { * aren't fully copied and use other TFD space. */ struct iwl_device_cmd { - struct iwl_cmd_header hdr; /* uCode API */ - u8 payload[DEF_CMD_PAYLOAD_SIZE]; + union { + struct { + struct iwl_cmd_header hdr; /* uCode API */ + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + }; + struct { + struct iwl_cmd_header_wide hdr_wide; + u8 payload_wide[DEF_CMD_PAYLOAD_SIZE - + sizeof(struct iwl_cmd_header_wide) + + sizeof(struct iwl_cmd_header)]; + }; + }; } __packed; #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) @@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag { * @resp_pkt: response packet, if %CMD_WANT_SKB was set * @_rx_page_order: (internally used to free response packet) * @_rx_page_addr: (internally used to free response packet) - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) - valid for SYNC mode only * @flags: can be CMD_* * @len: array of the lengths of the chunks in data * @dataflags: IWL_HCMD_DFL_* - * @id: id of the host command + * @id: command id of the host command, for wide commands encoding the + * version and group as well */ struct iwl_host_cmd { const void *data[IWL_MAX_CMD_TBS_PER_TFD]; struct iwl_rx_packet *resp_pkt; unsigned long _rx_page_addr; u32 _rx_page_order; - int handler_status; u32 flags; + u32 id; u16 len[IWL_MAX_CMD_TBS_PER_TFD]; u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD]; - u8 id; }; static inline void iwl_free_resp(struct iwl_host_cmd *cmd) @@ -379,6 +438,7 @@ enum iwl_trans_status { * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: firmware supports wide host command header * @command_names: array of command names, must be 256 entries * (one for each command); for debugging only * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until @@ -396,6 +456,7 @@ struct iwl_trans_config { bool rx_buf_size_8k; bool bc_table_dword; bool scd_set_active; + bool wide_cmd_header; const char *const *command_names; u32 sdio_adma_addr; @@ -544,10 +605,12 @@ struct iwl_trans_ops { u32 value); void (*ref)(struct iwl_trans *trans); void (*unref)(struct iwl_trans *trans); - void (*suspend)(struct iwl_trans *trans); + int (*suspend)(struct iwl_trans *trans); void (*resume)(struct iwl_trans *trans); - struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans); + struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv + *trigger); }; /** @@ -584,6 +647,8 @@ enum iwl_d0i3_mode { * @cfg - pointer to the configuration * @status: a bit-mask of transport status flags * @dev - pointer to struct device * that represents the device + * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. + * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported. * @hw_id: a u32 with the ID of the device / sub-device. * Set during transport allocation. * @hw_id_str: a string with info about HW ID. Set during transport allocation. @@ -603,6 +668,12 @@ enum iwl_d0i3_mode { * @dbg_conf_tlv: array of pointers to configuration TLVs for debug * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv + * @paging_req_addr: The location were the FW will upload / download the pages + * from. The address is set by the opmode + * @paging_db: Pointer to the opmode paging data base, the pointer is set by + * the opmode. + * @paging_download_buf: Buffer used for copying all of the pages before + * downloading them to the FW. The buffer is allocated in the opmode */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -612,6 +683,7 @@ struct iwl_trans { unsigned long status; struct device *dev; + u32 max_skb_frags; u32 hw_rev; u32 hw_id; char hw_id_str[52]; @@ -639,6 +711,14 @@ struct iwl_trans { struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; u8 dbg_dest_reg_num; + /* + * Paging parameters - All of the parameters should be set by the + * opmode when paging is enabled + */ + u32 paging_req_addr; + struct iwl_fw_paging *paging_db; + void *paging_download_buf; + enum iwl_d0i3_mode d0i3_mode; bool wowlan_d0i3; @@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) { might_sleep(); - trans->ops->d3_suspend(trans, test); + if (trans->ops->d3_suspend) + trans->ops->d3_suspend(trans, test); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, @@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans, bool test) { might_sleep(); + if (!trans->ops->d3_resume) + return 0; + return trans->ops->d3_resume(trans, status, test); } @@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans) trans->ops->unref(trans); } -static inline void iwl_trans_suspend(struct iwl_trans *trans) +static inline int iwl_trans_suspend(struct iwl_trans *trans) { - if (trans->ops->suspend) - trans->ops->suspend(trans); + if (!trans->ops->suspend) + return 0; + + return trans->ops->suspend(trans); } static inline void iwl_trans_resume(struct iwl_trans *trans) @@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans) } static inline struct iwl_trans_dump_data * -iwl_trans_dump_data(struct iwl_trans *trans) +iwl_trans_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) { if (!trans->ops->dump_data) return NULL; - return trans->ops->dump_data(trans); + return trans->ops->dump_data(trans, trigger); } static inline int iwl_trans_send_cmd(struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile index 2d7c3ea3c4f8..8c2c3d13b092 100644 --- a/drivers/net/wireless/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/iwlwifi/mvm/Makefile @@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o +iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM_SLEEP) += d3.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../ diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index b4737e296c92..e290ac67d975 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) } } -int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_bt_coex_notif_old(mvm, rxb); + return; + } IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); @@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); - - /* - * This is an async handler for a notification, returning anything other - * than 0 doesn't make sense even if HCMD failed. - */ - return 0; } void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) iwl_mvm_bt_coex_notif_handle(mvm); } -int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 ant_isolation = le32_to_cpup((void *)pkt->data); @@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, u8 __maybe_unused lower_bound, upper_bound; u8 lut; - if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT)) - return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd); + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BT_COEX_SPLIT)) { + iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb); + return; + } if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return 0; + return; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; + return; if (ant_isolation == mvm->last_ant_isol) - return 0; + return; for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) if (ant_isolation < antenna_coupling_ranges[lut + 1].range) @@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, mvm->last_ant_isol = ant_isolation; if (mvm->last_corun_lut == lut) - return 0; + return; mvm->last_corun_lut = lut; @@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, sizeof(cmd.corun_lut40)); - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, - sizeof(cmd), &cmd); + if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, + sizeof(cmd), &cmd)) + IWL_ERR(mvm, + "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); } diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index 6ac6de2af977..61c07b05fcaa 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); } -int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data; @@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old)); iwl_mvm_bt_coex_notif_handle(mvm); - - /* - * This is an async handler for a notification, returning anything other - * than 0 doesn't make sense even if HCMD failed. - */ - return 0; } static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac, @@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm) iwl_mvm_bt_coex_notif_handle(mvm); } -int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *dev_cmd) +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u32 ant_isolation = le32_to_cpup((void *)pkt->data); u8 __maybe_unused lower_bound, upper_bound; - int ret; u8 lut; struct iwl_bt_coex_cmd_old *bt_cmd; @@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, }; if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return 0; + return; lockdep_assert_held(&mvm->mutex); /* Ignore updates if we are in force mode */ if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return 0; + return; if (ant_isolation == mvm->last_ant_isol) - return 0; + return; for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) if (ant_isolation < antenna_coupling_ranges[lut + 1].range) @@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, mvm->last_ant_isol = ant_isolation; if (mvm->last_corun_lut == lut) - return 0; + return; mvm->last_corun_lut = lut; bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); if (!bt_cmd) - return 0; + return; cmd.data[0] = bt_cmd; bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD); @@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, sizeof(bt_cmd->bt4_corun_lut40)); - ret = iwl_mvm_send_cmd(mvm, &cmd); + if (iwl_mvm_send_cmd(mvm, &cmd)) + IWL_ERR(mvm, "failed to send BT_CONFIG command\n"); kfree(bt_cmd); - return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index beba375489f1..b8ee3121fbd2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -102,6 +102,7 @@ #define IWL_MVM_QUOTA_THRESHOLD 4 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_DISABLE_P2P_MIMO 0 +#define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 4165d104e4c3..04264e417c1c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm) { struct iwl_notification_wait wait_d3; - static const u8 d3_notif[] = { D3_CONFIG_CMD }; + static const u16 d3_notif[] = { D3_CONFIG_CMD }; int ret; iwl_init_notification_wait(&mvm->notif_wait, &wait_d3, @@ -1168,13 +1168,17 @@ remove_notif: int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + ret = iwl_trans_suspend(mvm->trans); + if (ret) + return ret; - iwl_trans_suspend(mvm->trans); mvm->trans->wowlan_d0i3 = wowlan->any; if (mvm->trans->wowlan_d0i3) { /* 'any' trigger means d0i3 usage */ if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - int ret = iwl_mvm_enter_d0i3_sync(mvm); + ret = iwl_mvm_enter_d0i3_sync(mvm); if (ret) return ret; @@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) mutex_lock(&mvm->d0i3_suspend_mutex); __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); + + iwl_trans_d3_suspend(mvm->trans, false); + return 0; } @@ -1935,28 +1942,59 @@ out: return 1; } +static int iwl_mvm_resume_d3(struct iwl_mvm *mvm) +{ + iwl_trans_resume(mvm->trans); + + return __iwl_mvm_resume(mvm, false); +} + +static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) +{ + bool exit_now; + enum iwl_d3_status d3_status; + + iwl_trans_d3_resume(mvm->trans, &d3_status, false); + + /* + * make sure to clear D0I3_DEFER_WAKEUP before + * calling iwl_trans_resume(), which might wait + * for d0i3 exit completion. + */ + mutex_lock(&mvm->d0i3_suspend_mutex); + __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); + exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, + &mvm->d0i3_suspend_flags); + mutex_unlock(&mvm->d0i3_suspend_mutex); + if (exit_now) { + IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); + _iwl_mvm_exit_d0i3(mvm); + } + + iwl_trans_resume(mvm->trans); + + if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { + int ret = iwl_mvm_exit_d0i3(mvm->hw->priv); + + if (ret) + return ret; + /* + * d0i3 exit will be deferred until reconfig_complete. + * make sure there we are out of d0i3. + */ + } + return 0; +} + int iwl_mvm_resume(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - iwl_trans_resume(mvm->trans); - - if (mvm->hw->wiphy->wowlan_config->any) { - /* 'any' trigger means d0i3 usage */ - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) { - int ret = iwl_mvm_exit_d0i3(hw->priv); - - if (ret) - return ret; - /* - * d0i3 exit will be deferred until reconfig_complete. - * make sure there we are out of d0i3. - */ - } - return 0; - } - - return __iwl_mvm_resume(mvm, false); + /* 'any' trigger means d0i3 was used */ + if (hw->wiphy->wowlan_config->any) + return iwl_mvm_resume_d0i3(mvm); + else + return iwl_mvm_resume_d3(mvm); } void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 5c8a65de0e77..383a3162046c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -63,6 +63,7 @@ * *****************************************************************************/ #include "mvm.h" +#include "fw-api-tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, @@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } +static inline char *iwl_dbgfs_is_match(char *name, char *buf) +{ + int len = strlen(name); + + return !strncmp(name, buf, len) ? buf + len : NULL; +} + +static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = -EINVAL; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tof_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.tof_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_disabled=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.one_sided_disabled = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_debug_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_debug_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("is_buf=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.tof_cfg.is_buf_required = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_tof_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_config_cmd(mvm); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_config_cmd *cmd; + + cmd = &mvm->tof_data.tof_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", + cmd->tof_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", + cmd->one_sided_disabled); + pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", + cmd->is_debug_mode); + pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", + cmd->is_buf_required); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("burst_period=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (!ret) + mvm->tof_data.responder_cfg.burst_period = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("burst_duration=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.burst_duration = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.num_of_burst_exp = value; + goto out; + } + + data = iwl_dbgfs_is_match("abort_responder=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.abort_responder = value; + goto out; + } + + data = iwl_dbgfs_is_match("get_ch_est=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.get_ch_est = value; + goto out; + } + + data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.recv_sta_req_params = value; + goto out; + } + + data = iwl_dbgfs_is_match("channel_num=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.channel_num = value; + goto out; + } + + data = iwl_dbgfs_is_match("bandwidth=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.bandwidth = value; + goto out; + } + + data = iwl_dbgfs_is_match("rate=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.rate = value; + goto out; + } + + data = iwl_dbgfs_is_match("bssid=", buf); + if (data) { + u8 *mac = mvm->tof_data.responder_cfg.bssid; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + } + + data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("toa_offset=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.toa_offset = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("ctrl_ch_position=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ctrl_ch_position = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_per_burst=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_per_burst = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; + goto out; + } + + data = iwl_dbgfs_is_match("asap_mode=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.responder_cfg.asap_mode = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_responder_cfg=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_responder_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_responder_config_cmd *cmd; + + cmd = &mvm->tof_data.responder_cfg; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", + le16_to_cpu(cmd->burst_period)); + pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", + cmd->burst_duration); + pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", + cmd->bandwidth); + pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", + cmd->channel_num); + pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", + cmd->ctrl_ch_position); + pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", + cmd->bssid); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", + cmd->num_of_burst_exp); + pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); + pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", + cmd->abort_responder); + pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", + cmd->get_ch_est); + pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", + cmd->recv_sta_req_params); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", + cmd->ftm_per_burst); + pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", + cmd->ftm_resp_ts_avail); + pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", + cmd->asap_mode); + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msecs = %d\n", + le16_to_cpu(cmd->tsf_timer_offset_msecs)); + pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", + le16_to_cpu(cmd->toa_offset)); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("request_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.request_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("initiator=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.initiator = value; + goto out; + } + + data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.one_sided_los_disable = value; + goto out; + } + + data = iwl_dbgfs_is_match("req_timeout=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.req_timeout = value; + goto out; + } + + data = iwl_dbgfs_is_match("report_policy=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.report_policy = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_random=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.macaddr_random = value; + goto out; + } + + data = iwl_dbgfs_is_match("num_of_ap=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req.num_of_ap = value; + goto out; + } + + data = iwl_dbgfs_is_match("macaddr_template=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); + } + + data = iwl_dbgfs_is_match("macaddr_mask=", buf); + if (data) { + u8 mac[ETH_ALEN]; + + if (!mac_pton(data, mac)) { + ret = -EINVAL; + goto out; + } + memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); + } + + data = iwl_dbgfs_is_match("ap=", buf); + if (data) { + struct iwl_tof_range_req_ap_entry ap; + int size = sizeof(struct iwl_tof_range_req_ap_entry); + u16 burst_period; + u8 *mac = ap.bssid; + unsigned int i; + + if (sscanf(data, "%u %hhd %hhx %hhx" + "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" + "%hhx %hhx %hx" + "%hhx %hhx %x" + "%hhx %hhx %hhx %hhx", + &i, &ap.channel_num, &ap.bandwidth, + &ap.ctrl_ch_position, + mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, + &ap.measure_type, &ap.num_of_bursts, + &burst_period, + &ap.samples_per_burst, &ap.retries_per_sample, + &ap.tsf_delta, &ap.location_req, &ap.asap_mode, + &ap.enable_dyn_ack, &ap.rssi) != 20) { + ret = -EINVAL; + goto out; + } + if (i >= IWL_MVM_TOF_MAX_APS) { + IWL_ERR(mvm, "Invalid AP index %d\n", i); + ret = -EINVAL; + goto out; + } + + ap.burst_period = cpu_to_le16(burst_period); + + memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); + goto out; + } + + data = iwl_dbgfs_is_match("send_range_request=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_range_request_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_cmd *cmd; + int i; + + cmd = &mvm->tof_data.range_req; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", + cmd->initiator); + pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", + cmd->one_sided_los_disable); + pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", + cmd->req_timeout); + pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", + cmd->report_policy); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", + cmd->macaddr_random); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", + cmd->macaddr_template); + pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", + cmd->macaddr_mask); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", + cmd->num_of_ap); + for (i = 0; i < cmd->num_of_ap; i++) { + struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: channel_num=%hhx bw=%hhx" + " control=%hhx bssid=%pM type=%hhx" + " num_of_bursts=%hhx burst_period=%hx ftm=%hhx" + " retries=%hhx tsf_delta=%x location_req=%hhx " + " asap=%hhx enable=%hhx rssi=%hhx\n", + i, ap->channel_num, ap->bandwidth, + ap->ctrl_ch_position, ap->bssid, + ap->measure_type, ap->num_of_bursts, + ap->burst_period, ap->samples_per_burst, + ap->retries_per_sample, ap->tsf_delta, + ap->location_req, ap->asap_mode, + ap->enable_dyn_ack, ap->rssi); + } + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.tsf_timer_offset_msec = + cpu_to_le16(value); + goto out; + } + + data = iwl_dbgfs_is_match("min_delta_ftm=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.min_delta_ftm = value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw20M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw40M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.range_req_ext.ftm_format_and_bw80M = + value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_req_ext=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + struct iwl_tof_range_req_ext_cmd *cmd; + + cmd = &mvm->tof_data.range_req_ext; + + mutex_lock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, + "tsf_timer_offset_msec = %hx\n", + cmd->tsf_timer_offset_msec); + pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n", + cmd->min_delta_ftm); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw20M = %hhx\n", + cmd->ftm_format_and_bw20M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw40M = %hhx\n", + cmd->ftm_format_and_bw40M); + pos += scnprintf(buf + pos, bufsz - pos, + "ftm_format_and_bw80M = %hhx\n", + cmd->ftm_format_and_bw80M); + + mutex_unlock(&mvm->mutex); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, + char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int value, ret = 0; + int abort_id; + char *data; + + mutex_lock(&mvm->mutex); + + data = iwl_dbgfs_is_match("abort_id=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0) + mvm->tof_data.last_abort_id = value; + goto out; + } + + data = iwl_dbgfs_is_match("send_range_abort=", buf); + if (data) { + ret = kstrtou32(data, 10, &value); + if (ret == 0 && value) { + abort_id = mvm->tof_data.last_abort_id; + ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); + goto out; + } + } + +out: + mutex_unlock(&mvm->mutex); + return ret ?: count; +} + +static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[32]; + int pos = 0; + const size_t bufsz = sizeof(buf); + int last_abort_id; + + mutex_lock(&mvm->mutex); + last_abort_id = mvm->tof_data.last_abort_id; + mutex_unlock(&mvm->mutex); + + pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", + last_abort_id); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char *buf; + int pos = 0; + const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; + struct iwl_tof_range_rsp_ntfy *cmd; + int i, ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + mutex_lock(&mvm->mutex); + cmd = &mvm->tof_data.range_resp; + + pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", + cmd->request_id); + pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", + cmd->request_status); + pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", + cmd->last_in_batch); + pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", + cmd->num_of_aps); + for (i = 0; i < cmd->num_of_aps; i++) { + struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; + + pos += scnprintf(buf + pos, bufsz - pos, + "ap %.2d: bssid=%pM status=%hhx bw=%hhx" + " rtt=%x rtt_var=%x rtt_spread=%x" + " rssi=%hhx rssi_spread=%hhx" + " range=%x range_var=%x" + " time_stamp=%x\n", + i, ap->bssid, ap->measure_status, + ap->measure_bw, + ap->rtt, ap->rtt_variance, ap->rtt_spread, + ap->rssi, ap->rssi_spread, ap->range, + ap->range_variance, ap->timestamp); + } + mutex_unlock(&mvm->mutex); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) { @@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); +MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && + !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { + if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) + MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, + mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, + S_IRUSR); + } + /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index ffb4b5cef275..ca4a1f8f82a5 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -974,7 +974,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0); + iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); @@ -1200,12 +1200,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf, if (ptr) { for (ofs = 0; ofs < len; ofs += 16) { pos += scnprintf(buf + pos, bufsz - pos, - "0x%.4x ", ofs); - hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos, - bufsz - pos, false); - pos += strlen(buf + pos); - if (bufsz - pos > 0) - buf[pos++] = '\n'; + "0x%.4x %16ph\n", ofs, ptr + ofs); } } else { pos += scnprintf(buf + pos, bufsz - pos, diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index b1baa33cc19b..b86b1697d56f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -413,7 +413,7 @@ struct iwl_beacon_filter_cmd { #define IWL_BF_TEMP_FAST_FILTER_MIN 0 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 -#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5 +#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 737774a01c74..660cc1c93e19 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -87,41 +87,6 @@ struct iwl_ssid_ie { u8 ssid[IEEE80211_MAX_SSID_LEN]; } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ -/* How many statistics are gathered for each channel */ -#define SCAN_RESULTS_STATISTICS 1 - -/** - * enum iwl_scan_complete_status - status codes for scan complete notifications - * @SCAN_COMP_STATUS_OK: scan completed successfully - * @SCAN_COMP_STATUS_ABORT: scan was aborted by user - * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed - * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready - * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed - * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed - * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command - * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort - * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax - * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful - * (not an error!) - * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver - * asked for - * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events -*/ -enum iwl_scan_complete_status { - SCAN_COMP_STATUS_OK = 0x1, - SCAN_COMP_STATUS_ABORT = 0x2, - SCAN_COMP_STATUS_ERR_SLEEP = 0x3, - SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4, - SCAN_COMP_STATUS_ERR_PROBE = 0x5, - SCAN_COMP_STATUS_ERR_WAKEUP = 0x6, - SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7, - SCAN_COMP_STATUS_ERR_INTERNAL = 0x8, - SCAN_COMP_STATUS_ERR_COEX = 0x9, - SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA, - SCAN_COMP_STATUS_ITERATION_END = 0x0B, - SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C, -}; - /* scan offload */ #define IWL_SCAN_MAX_BLACKLIST_LEN 64 #define IWL_SCAN_SHORT_BLACKLIST_LEN 16 @@ -143,71 +108,6 @@ enum scan_framework_client { SCAN_CLIENT_ASSET_TRACKING = BIT(2), }; -/** - * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6 - * @scan_flags: see enum iwl_scan_flags - * @channel_count: channels in channel list - * @quiet_time: dwell time, in milliseconds, on quiet channel - * @quiet_plcp_th: quiet channel num of packets threshold - * @good_CRC_th: passive to active promotion threshold - * @rx_chain: RXON rx chain. - * @max_out_time: max TUs to be out of associated channel - * @suspend_time: pause scan this TUs when returning to service channel - * @flags: RXON flags - * @filter_flags: RXONfilter - * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. - * @direct_scan: list of SSIDs for directed active scan - * @scan_type: see enum iwl_scan_type. - * @rep_count: repetition count for each scheduled scan iteration. - */ -struct iwl_scan_offload_cmd { - __le16 len; - u8 scan_flags; - u8 channel_count; - __le16 quiet_time; - __le16 quiet_plcp_th; - __le16 good_CRC_th; - __le16 rx_chain; - __le32 max_out_time; - __le32 suspend_time; - /* RX_ON_FLAGS_API_S_VER_1 */ - __le32 flags; - __le32 filter_flags; - struct iwl_tx_cmd tx_cmd[2]; - /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - __le32 scan_type; - __le32 rep_count; -} __packed; - -enum iwl_scan_offload_channel_flags { - IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE = BIT(0), - IWL_SCAN_OFFLOAD_CHANNEL_NARROW = BIT(22), - IWL_SCAN_OFFLOAD_CHANNEL_FULL = BIT(24), - IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL = BIT(25), -}; - -/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs: - * __le32 type: bitmap; bits 1-20 are for directed scan to i'th ssid and - * see enum iwl_scan_offload_channel_flags. - * __le16 channel_number: channel number 1-13 etc. - * __le16 iter_count: repetition count for the channel. - * __le32 iter_interval: interval between two iterations on one channel. - * u8 active_dwell. - * u8 passive_dwell. - */ -#define IWL_SCAN_CHAN_SIZE 14 - -/** - * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S - * @scan_cmd: scan command fixed part - * @data: scan channel configuration and probe request frames - */ -struct iwl_scan_offload_cfg { - struct iwl_scan_offload_cmd scan_cmd; - u8 data[0]; -} __packed; - /** * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S * @ssid: MAC address to filter out @@ -297,35 +197,6 @@ enum iwl_scan_ebs_status { IWL_SCAN_EBS_INACTIVE, }; -/** - * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 - * @last_schedule_line: last schedule line executed (fast or regular) - * @last_schedule_iteration: last scan iteration executed before scan abort - * @status: enum iwl_scan_offload_compleate_status - * @ebs_status: last EBS status, see IWL_SCAN_EBS_* - */ -struct iwl_scan_offload_complete { - u8 last_schedule_line; - u8 last_schedule_iteration; - u8 status; - u8 ebs_status; -} __packed; - -/** - * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1 - * @ssid_bitmap: SSIDs indexes found in this iteration - * @client_bitmap: clients that are active and wait for this notification - */ -struct iwl_sched_scan_results { - __le16 ssid_bitmap; - u8 client_bitmap; - u8 reserved; -}; - -/* Unified LMAC scan API */ - -#define IWL_MVM_BASIC_PASSIVE_DWELL 110 - /** * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S * @tx_flags: combination of TX_CMD_FLG_* @@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete { /* UMAC Scan API */ -/** - * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands - * @size: size of the command (not including header) - * @reserved0: for future use and alignment - * @ver: API version number - */ -struct iwl_mvm_umac_cmd_hdr { - __le16 size; - u8 reserved0; - u8 ver; -} __packed; - /* The maximum of either of these cannot exceed 8, because we use an * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). */ @@ -621,7 +480,6 @@ enum iwl_channel_flags { /** * struct iwl_scan_config - * @hdr: umac command header * @flags: enum scan_config_flags * @tx_chains: valid_tx antenna - ANT_* definitions * @rx_chains: valid_rx antenna - ANT_* definitions @@ -639,7 +497,6 @@ enum iwl_channel_flags { * @channel_array: default supported channels */ struct iwl_scan_config { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 flags; __le32 tx_chains; __le32 rx_chains; @@ -735,7 +592,6 @@ struct iwl_scan_req_umac_tail { /** * struct iwl_scan_req_umac - * @hdr: umac command header * @flags: &enum iwl_umac_scan_flags * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority @@ -754,7 +610,6 @@ struct iwl_scan_req_umac_tail { * &struct iwl_scan_req_umac_tail */ struct iwl_scan_req_umac { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 flags; __le32 uid; __le32 ooc_priority; @@ -776,12 +631,10 @@ struct iwl_scan_req_umac { /** * struct iwl_umac_scan_abort - * @hdr: umac command header * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @flags: reserved */ struct iwl_umac_scan_abort { - struct iwl_mvm_umac_cmd_hdr hdr; __le32 uid; __le32 flags; } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h index 21dd5b771660..493a8bdfbc9e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h @@ -366,8 +366,8 @@ struct iwl_mvm_rm_sta_cmd { * ( MGMT_MCAST_KEY = 0x1f ) * @ctrl_flags: %iwl_sta_key_flag * @IGTK: - * @K1: IGTK master key - * @K2: IGTK sub key + * @K1: unused + * @K2: unused * @sta_id: station ID that support IGTK * @key_id: * @receive_seq_cnt: initial RSC/PN needed for replay check diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h new file mode 100644 index 000000000000..eed6271d01a3 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __fw_api_tof_h__ +#define __fw_api_tof_h__ + +#include "fw-api.h" + +/* ToF sub-group command IDs */ +enum iwl_mvm_tof_sub_grp_ids { + TOF_RANGE_REQ_CMD = 0x1, + TOF_CONFIG_CMD = 0x2, + TOF_RANGE_ABORT_CMD = 0x3, + TOF_RANGE_REQ_EXT_CMD = 0x4, + TOF_RESPONDER_CONFIG_CMD = 0x5, + TOF_NW_INITIATED_RES_SEND_CMD = 0x6, + TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, + TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, + TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, + TOF_RANGE_RESPONSE_NOTIF = 0xFE, + TOF_MCSI_DEBUG_NOTIF = 0xFB, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: 0 enabled, 1 - disabled + * @one_sided_disabled: 0 enabled, 1 - disabled + * @is_debug_mode: 1 debug mode, 0 - otherwise + * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise + */ +struct iwl_tof_config_cmd { + __le32 sub_grp_cmd_id; + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @burst_period: future use: (currently hard coded in the LMAC) + * The interval between two sequential bursts. + * @min_delta_ftm: future use: (currently hard coded in the LMAC) + * The minimum delay between two sequential FTM Responses + * in the same burst. + * @burst_duration: future use: (currently hard coded in the LMAC) + * The total time for all FTMs handshake in the same burst. + * Affect the time events duration in the LMAC. + * @num_of_burst_exp: future use: (currently hard coded in the LMAC) + * The number of bursts for the current ToF request. Affect + * the number of events allocations in the current iteration. + * @get_ch_est: for xVT only, NA for driver + * @abort_responder: when set to '1' - Responder will terminate its activity + * (all other fields in the command are ignored) + * @recv_sta_req_params: 1 - Responder will ignore the other Responder's + * params and use the recomended Initiator params. + * 0 - otherwise + * @channel_num: current AP Channel + * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rate: current AP rate + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @ftm_per_burst: FTMs per Burst + * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, + * '1' - we measure over the Initial FTM Response + * @asap_mode: ASAP / Non ASAP mode for the current WLS station + * @sta_id: index of the AP STA when in AP mode + * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF + * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @bssid: Current AP BSSID + */ +struct iwl_tof_responder_config_cmd { + __le32 sub_grp_cmd_id; + __le16 burst_period; + u8 min_delta_ftm; + u8 burst_duration; + u8 num_of_burst_exp; + u8 get_ch_est; + u8 abort_responder; + u8 recv_sta_req_params; + u8 channel_num; + u8 bandwidth; + u8 rate; + u8 ctrl_ch_position; + u8 ftm_per_burst; + u8 ftm_resp_ts_avail; + u8 asap_mode; + u8 sta_id; + __le16 tsf_timer_offset_msecs; + __le16 toa_offset; + u8 bssid[ETH_ALEN]; +} __packed; + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le32 sub_grp_cmd_id; + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +#define IWL_MVM_TOF_MAX_APS 21 + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @bssid: AP's bss id + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0 Initiator interact with regular AP + * 1 Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + */ +struct iwl_tof_range_req_ap_entry { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; +} __packed; + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPOSE_ASAP = 1, + IWL_MVM_TOF_RESPOSE_TIMEOUT, + IWL_MVM_TOF_RESPOSE_COMPLETE, +}; + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + */ +struct iwl_tof_range_req_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 los_det_disable; + u8 num_of_ap; + u8 macaddr_random; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +/** + * struct iwl_tof_gen_resp_cmd - generic ToF response + */ +struct iwl_tof_gen_resp_cmd { + __le32 sub_grp_cmd_id; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @measure_status: current APs measurement status + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [nSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [nsec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + __le16 reserved; + __le32 range; + __le32 range_variance; + __le32 timestamp; +} __packed; + +/** + * struct iwl_tof_range_rsp_ntfy - + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_neighbor_report_notif + * @bssid: BSSID of the AP which sent the report + * @request_token: same token as the corresponding request + * @status: + * @report_ie_len: the length of the response frame starting from the Element ID + * @data: the IEs + */ +struct iwl_tof_neighbor_report { + u8 bssid[ETH_ALEN]; + u8 request_token; + u8 status; + __le16 report_ie_len; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + */ +struct iwl_tof_range_abort_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 reserved[3]; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h index 81c4ea3c6958..853698ab8b05 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h @@ -124,6 +124,18 @@ enum iwl_tx_flags { TX_CMD_FLG_HCCA_CHUNK = BIT(31) }; /* TX_FLAGS_BITS_API_S_VER_1 */ +/** + * enum iwl_tx_pm_timeouts - pm timeout values in TX command + * @PM_FRAME_NONE: no need to suspend sleep mode + * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU + * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec + */ +enum iwl_tx_pm_timeouts { + PM_FRAME_NONE = 0, + PM_FRAME_MGMT = 2, + PM_FRAME_ASSOC = 3, +}; + /* * TX command security control */ diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 16e9ef49397f..4af7513adda2 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -75,6 +75,7 @@ #include "fw-api-coex.h" #include "fw-api-scan.h" #include "fw-api-stats.h" +#include "fw-api-tof.h" /* Tx queue numbers */ enum { @@ -119,6 +120,9 @@ enum { ADD_STA = 0x18, REMOVE_STA = 0x19, + /* paging get item */ + FW_GET_ITEM_CMD = 0x1a, + /* TX */ TX_CMD = 0x1c, TXPATH_FLUSH = 0x1e, @@ -148,6 +152,9 @@ enum { LQ_CMD = 0x4e, + /* paging block to FW cpu2 */ + FW_PAGING_BLOCK_CMD = 0x4f, + /* Scan offload */ SCAN_OFFLOAD_REQUEST_CMD = 0x51, SCAN_OFFLOAD_ABORT_CMD = 0x52, @@ -163,6 +170,10 @@ enum { CALIB_RES_NOTIF_PHY_DB = 0x6b, /* PHY_DB_CMD = 0x6c, */ + /* ToF - 802.11mc FTM */ + TOF_CMD = 0x10, + TOF_NOTIFICATION = 0x11, + /* Power - legacy power table command */ POWER_TABLE_CMD = 0x77, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, @@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd { u8 data[]; } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/* + * struct iwl_fw_paging_cmd - paging layout + * + * (FW_PAGING_BLOCK_CMD = 0x4f) + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side +*/ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/* + * Fw items ID's + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/* + * struct iwl_fw_get_item_cmd - get an item from the fw + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + /** * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section @@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info { __le16 frame_time; } __packed; +/* + * TCP offload Rx assist info + * + * bits 0:3 - reserved + * bits 4:7 - MIC CRC length + * bits 8:12 - MAC header length + * bit 13 - Padding indication + * bit 14 - A-AMSDU indication + * bit 15 - Offload enabled + */ +enum iwl_csum_rx_assist_info { + CSUM_RXA_RESERVED_MASK = 0x000f, + CSUM_RXA_MICSIZE_MASK = 0x00f0, + CSUM_RXA_HEADERLEN_MASK = 0x1f00, + CSUM_RXA_PADD = BIT(13), + CSUM_RXA_AMSDU = BIT(14), + CSUM_RXA_ENA = BIT(15) +}; + +/** + * struct iwl_rx_mpdu_res_start - phy info + * @assist: see CSUM_RX_ASSIST_ above + */ struct iwl_rx_mpdu_res_start { __le16 byte_count; - __le16 reserved; -} __packed; + __le16 assist; +} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ /** * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags @@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame + * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw + * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK: * @RX_MPDU_RES_STATUS_STA_ID_MSK: * @RX_MPDU_RES_STATUS_RRF_KILL: @@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), + RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), + RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MPDU_RES_STATUS_HASH_INDEX_MSK = (0x3F0000), RX_MPDU_RES_STATUS_STA_ID_MSK = (0x1f000000), RX_MPDU_RES_STATUS_RRF_KILL = BIT(29), diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index eb10c5ee4a14..4a0ce83315bd 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } +static void iwl_free_fw_paging(struct iwl_mvm *mvm) +{ + int i; + + if (!mvm->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + if (!mvm->fw_paging_db[i].fw_paging_block) { + IWL_DEBUG_FW(mvm, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + + __free_pages(mvm->fw_paging_db[i].fw_paging_block, + get_order(mvm->fw_paging_db[i].fw_paging_size)); + } + kfree(mvm->trans->paging_download_buf); + memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); +} + +static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + if (sec_idx >= IWL_UCODE_SECTION_MAX) { + IWL_ERR(mvm, "driver didn't find paging image\n"); + iwl_free_fw_paging(mvm); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + mvm->fw_paging_db[0].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d CSS bytes to first block\n", + mvm->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + mvm->fw_paging_db[idx].fw_paging_size); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d paging bytes to block %d\n", + mvm->fw_paging_db[idx].fw_paging_size, + idx); + + offset += mvm->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (mvm->num_of_pages_in_last_blk > 0) { + memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); + + IWL_DEBUG_FW(mvm, + "Paging: copied %d pages in the last block %d\n", + mvm->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx = 0; + int order, num_of_pages; + int dma_enabled; + + if (mvm->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(mvm->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + mvm->num_of_paging_blk = ((num_of_pages - 1) / + NUM_OF_PAGE_PER_GROUP) + 1; + + mvm->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); + + IWL_DEBUG_FW(mvm, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + mvm->num_of_paging_blk, + mvm->num_of_pages_in_last_blk); + + /* allocate block of 4Kbytes for paging CSS */ + order = get_order(FW_PAGING_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one since + * we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + + /* + * allocate blocks in dram. + * since that CSS allocated in fw_paging_db[0] loop start from index 1 + */ + for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + /* allocate block of PAGING_BLOCK_SIZE (32K) */ + order = get_order(PAGING_BLOCK_SIZE); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + + mvm->fw_paging_db[blk_idx].fw_paging_block = block; + mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE; + + if (dma_enabled) { + phys = dma_map_page(mvm->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(mvm->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(mvm); + return -ENOMEM; + } + mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + mvm->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + IWL_DEBUG_FW(mvm, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_mvm *mvm, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(mvm, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(mvm, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) +{ + int blk_idx; + __le32 dev_phy_addr; + struct iwl_fw_paging_cmd fw_paging_cmd = { + .flags = + cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (mvm->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(mvm->num_of_paging_blk), + }; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { + dev_phy_addr = + cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >> + PAGE_2_EXP_SIZE); + fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(fw_paging_cmd), &fw_paging_cmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + }; + + cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); + + ret = iwl_mvm_send_cmd(mvm, &cmd); + if (ret) { + IWL_ERR(mvm, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(mvm, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE, + GFP_KERNEL); + if (!mvm->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + mvm->trans->paging_db = mvm->fw_paging_db; + IWL_DEBUG_FW(mvm, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + mvm->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, const struct fw_img *fw; int ret, i; enum iwl_ucode_type old_type = mvm->cur_ucode; - static const u8 alive_cmd[] = { MVM_ALIVE }; + static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; if (ucode_type == IWL_UCODE_REGULAR && @@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, MVM_UCODE_ALIVE_TIMEOUT); if (ret) { + if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + IWL_ERR(mvm, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_prph(mvm->trans, SB_CPU_1_STATUS), + iwl_read_prph(mvm->trans, SB_CPU_2_STATUS)); mvm->cur_ucode = old_type; return ret; } @@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + /* + * configure and operate fw paging mechanism. + * driver configures the paging flow only once, CPU2 paging image + * included in the IWL_UCODE_INIT image. + */ + if (fw->paging_mem_size) { + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(mvm->trans->dev)) { + ret = iwl_trans_get_paging_item(mvm); + if (ret) { + IWL_ERR(mvm, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(mvm, fw); + if (ret) { + IWL_ERR(mvm, "failed to send the paging cmd\n"); + iwl_free_fw_paging(mvm); + return ret; + } + } + /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they @@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) { struct iwl_notification_wait calib_wait; - static const u8 init_complete[] = { + static const u16 init_complete[] = { INIT_COMPLETE_NOTIF, CALIB_RES_NOTIF_PHY_DB }; @@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) return; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n", - pkt->hdr.flags); - goto exit; - } - mem_cfg = (void *)pkt->data; mvm->shared_mem_cfg.shared_mem_addr = @@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) le32_to_cpu(mem_cfg->page_buff_size); IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); -exit: iwl_free_resp(&cmd); } int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, struct iwl_mvm_dump_desc *desc, - unsigned int delay) + struct iwl_fw_dbg_trigger_tlv *trigger) { + unsigned int delay = 0; + + if (trigger) + delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) return -EBUSY; @@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, le32_to_cpu(desc->trig_desc.type)); mvm->fw_dump_desc = desc; + mvm->fw_dump_trig = trigger; queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay); @@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, } int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, unsigned int delay) + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_mvm_dump_desc *desc; @@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay); + return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); } int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) { - unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); u16 occurrences = le16_to_cpu(trigger->occurrences); int ret, len = 0; char buf[64]; @@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, len = strlen(buf) + 1; } - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, - len, delay); + ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, + trigger); + if (ret) return ret; @@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) - iwl_mvm_get_shared_mem_conf(mvm); + iwl_mvm_get_shared_mem_conf(mvm); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) @@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } + if (iwl_mvm_is_csum_supported(mvm) && + mvm->cfg->features & NETIF_F_RXCSUM) + iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3); + /* allow FW/transport low power modes if not during restart */ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; @@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, (flags & SW_CARD_DISABLED) ? "Kill" : "On", (flags & CT_KILL_CARD_DISABLED) ? "Reached" : "Not reached"); - - return 0; } -int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; @@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, le32_to_cpu(mfuart_notif->external_ver), le32_to_cpu(mfuart_notif->status), le32_to_cpu(mfuart_notif->duration)); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 1812dd018af2..3424315dd876 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm, } } -int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; @@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); } } - - return 0; } static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, @@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); } -int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacons_notif *mb = (void *)pkt->data; @@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_beacon_loss_iterator, mb); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dfdab38e2d4a..537a15719d45 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + ieee80211_hw_set(hw, TDLS_WIDER_BW); } if (fw_has_capa(&mvm->fw->ucode_capa, @@ -649,6 +650,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; } + hw->netdev_features |= mvm->cfg->features; + if (!iwl_mvm_is_csum_supported(mvm)) + hw->netdev_features &= ~NETIF_F_RXCSUM; + ret = ieee80211_register_hw(mvm->hw); if (ret) iwl_mvm_leds_exit(mvm); @@ -1120,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) u32 file_len, fifo_data_len = 0; u32 smem_len = mvm->cfg->smem_len; u32 sram2_len = mvm->cfg->dccm2_len; + bool monitor_dump_only = false; lockdep_assert_held(&mvm->mutex); + if (mvm->fw_dump_trig && + mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + monitor_dump_only = true; + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); if (!fw_error_dump) return; @@ -1174,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) fifo_data_len + sizeof(*dump_info); + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + + /* If we only want a monitor dump, reset the file length */ + if (monitor_dump_only) { + file_len = sizeof(*dump_file) + sizeof(*dump_data) + + sizeof(*dump_info); + } + /* * In 8000 HW family B-step include the ICCM (which resides separately) */ @@ -1186,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + mvm->fw_dump_desc->len; - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - dump_file = vzalloc(file_len); if (!dump_file) { kfree(fw_error_dump); @@ -1239,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_data = iwl_fw_error_next_data(dump_data); } + /* In case we only want monitor dump, skip to dump trasport data */ + if (monitor_dump_only) + goto dump_trans_data; + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -1282,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dump_mem->data, IWL8260_ICCM_LEN); } - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans); +dump_trans_data: + fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, + mvm->fw_dump_trig); fw_error_dump->op_mode_len = file_len; if (fw_error_dump->trans_ptr) file_len += fw_error_dump->trans_ptr->len; @@ -1291,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0, GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump); + mvm->fw_dump_trig = NULL; clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); } @@ -1433,22 +1456,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) { - bool exit_now; - if (!iwl_mvm_is_d0i3_supported(mvm)) return; - mutex_lock(&mvm->d0i3_suspend_mutex); - __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); - exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP, - &mvm->d0i3_suspend_flags); - mutex_unlock(&mvm->d0i3_suspend_mutex); - - if (exit_now) { - IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n"); - _iwl_mvm_exit_d0i3(mvm); - } - if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) if (!wait_event_timeout(mvm->d0i3_exit_waitq, !test_bit(IWL_MVM_STATUS_IN_D0I3, @@ -1664,6 +1674,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_unlock; } + mvmvif->features |= hw->netdev_features; + ret = iwl_mvm_mac_ctxt_add(mvm, vif); if (ret) goto out_release; @@ -2880,10 +2892,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall-through */ - case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; break; + case WLAN_CIPHER_SUITE_CCMP: + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; + break; case WLAN_CIPHER_SUITE_AES_CMAC: WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); break; @@ -3025,7 +3038,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, int res, time_reg = DEVICE_SYSTEM_TIME_REG; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; - static const u8 time_event_response[] = { HOT_SPOT_CMD }; + static const u16 time_event_response[] = { HOT_SPOT_CMD }; struct iwl_notification_wait wait_time_event; struct iwl_hs20_roc_req aux_roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 2d4bad5fe825..b95a07ec9e36 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -80,6 +80,7 @@ #include "sta.h" #include "fw-api.h" #include "constants.h" +#include "tof.h" #define IWL_INVALID_MAC80211_QUEUE 0xff #define IWL_MVM_MAX_ADDRESSES 5 @@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops; * be up'ed after the INIT fw asserted. This is useful to be able to use * proprietary tools over testmode to debug the INIT fw. * @tfd_q_hang_detect: enabled the detection of hung transmit queues - * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power - * Save)-2(default), LP(Low Power)-3 + * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { bool init_dbg; @@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data { * # of received beacons accumulated over FW restart, and the current * average signal of beacons retrieved from the firmware * @csa_failed: CSA failed to schedule time event, report an error later + * @features: hw features active for this vif */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -437,6 +438,9 @@ struct iwl_mvm_vif { /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; + + /* TCP Checksum Offload */ + netdev_features_t features; }; static inline struct iwl_mvm_vif * @@ -606,6 +610,11 @@ struct iwl_mvm { /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; + /* Paging section */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; + /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; @@ -686,6 +695,7 @@ struct iwl_mvm { * can hold 16 keys at most. Reflect this fact. */ unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)]; + u8 fw_key_deleted[STA_KEY_MAX_NUM]; /* references taken by the driver and spinlock protecting them */ spinlock_t refs_lock; @@ -698,6 +708,7 @@ struct iwl_mvm { u8 fw_dbg_conf; struct delayed_work fw_dump_wk; struct iwl_mvm_dump_desc *fw_dump_desc; + struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -822,6 +833,7 @@ struct iwl_mvm { struct iwl_mvm_shared_mem_cfg shared_mem_cfg; u32 ciphers[6]; + struct iwl_mvm_tof_data tof_data; }; /* Extract MVM priv from op_mode and _hw */ @@ -941,6 +953,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) IWL_MVM_BT_COEX_RRC; } +static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSUM_SUPPORT); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -974,12 +992,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); /* Tx / Host Commands */ int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd); -int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, +int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data); int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, u32 *status); -int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, +int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, @@ -988,10 +1006,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, u8 sta_id); -void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag); void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); @@ -1003,6 +1017,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; } int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync); void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm); +static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); +} + static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) { flush_work(&mvm->async_handlers_wk); @@ -1011,9 +1036,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm) /* Statistics */ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); -int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); @@ -1059,27 +1083,20 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, * FW notifications / CMD responses handlers * Convention: iwl_mvm_rx_ */ -int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* MVM PHY */ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, @@ -1106,12 +1123,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, @@ -1135,29 +1150,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); /* Scheduled scan */ -int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies, int type); -int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* UMAC scan */ int iwl_mvm_config_scan(struct iwl_mvm *mvm); -int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); -int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1196,9 +1206,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); @@ -1254,9 +1263,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm); /* BT Coex */ int iwl_send_bt_init_conf(struct iwl_mvm *mvm); -int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm); @@ -1274,9 +1282,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm); void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm); int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm); -int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum ieee80211_rssi_event_data); u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm, @@ -1285,9 +1292,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm, struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm, enum ieee80211_band band); -int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* beacon filtering */ #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1376,9 +1382,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); -int iwl_mvm_temp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); @@ -1390,9 +1395,8 @@ struct iwl_mcc_update_resp * iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, enum iwl_mcc_source src_id); int iwl_mvm_init_mcc(struct iwl_mvm *mvm); -int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, const char *alpha2, enum iwl_mcc_source src_id, @@ -1431,8 +1435,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tdls_ch_switch_work(struct work_struct *work); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); @@ -1442,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, unsigned int delay); + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, struct iwl_mvm_dump_desc *desc, - unsigned int delay); + struct iwl_fw_dbg_trigger_tlv *trigger); void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, struct iwl_fw_dbg_trigger_tlv *trigger, diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index 2a6be350704a..328187da7541 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, return ret; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - goto exit; - } /* Extract NVM response */ nvm_resp = (void *)pkt->data; @@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, return ERR_PTR(ret); pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n", - pkt->hdr.flags); - ret = -EIO; - goto exit; - } /* Extract MCC response */ mcc_resp = (void *)pkt->data; @@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) return retval; } -int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mcc_chub_notif *notif = (void *)pkt->data; @@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) - return 0; + return; mcc[0] = notif->mcc >> 8; mcc[1] = notif->mcc & 0xff; @@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, mcc, src); regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); if (IS_ERR_OR_NULL(regd)) - return 0; + return; regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); kfree(regd); - - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index e4fa50075ffd..07e68929b005 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) } struct iwl_rx_handlers { - u8 cmd_id; + u16 cmd_id; bool async; - int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; #define RX_HANDLER(_cmd_id, _fn, _async) \ { .cmd_id = _cmd_id , .fn = _fn , .async = _async } +#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ + { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } /* * Handlers for fw notifications @@ -221,7 +222,6 @@ struct iwl_rx_handlers { * called from a worker with mvm->mutex held. */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { - RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false), RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false), RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), @@ -261,9 +261,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, true), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), + RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), }; #undef RX_HANDLER +#undef RX_HANDLER_GRP #define CMD(x) [x] = #x static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { @@ -286,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(PHY_CONFIGURATION_CMD), CMD(CALIB_RES_NOTIF_PHY_DB), CMD(SET_CALIB_DEFAULT_CMD), + CMD(FW_PAGING_BLOCK_CMD), CMD(ADD_STA_KEY), CMD(ADD_STA), + CMD(FW_GET_ITEM_CMD), CMD(REMOVE_STA), CMD(LQ_CMD), CMD(SCAN_OFFLOAD_CONFIG_CMD), @@ -470,6 +474,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; + trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_WIDE_CMD_HDR); if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE) trans_cfg.bc_table_dword = true; @@ -576,6 +582,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, /* rpm starts with a taken ref. only set the appropriate bit here. */ mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; + iwl_mvm_tof_init(mvm); + return op_mode; out_unregister: @@ -623,14 +631,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + iwl_mvm_tof_clean(mvm); + ieee80211_free_hw(mvm->hw); } struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; - int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); + void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) @@ -667,9 +676,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { - if (entry->fn(mvm, &entry->rxb, NULL)) - IWL_WARN(mvm, - "returned value from ASYNC handlers are ignored\n"); + entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); kfree(entry); @@ -698,24 +705,30 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, if (!cmds_trig->cmds[i].cmd_id) break; - if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd) + if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd || + cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x received", - pkt->hdr.cmd); + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); break; } } -static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, + struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); u8 i; + if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) { + iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); + return; + } + iwl_mvm_rx_check_trigger(mvm, pkt); /* @@ -729,16 +742,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i]; struct iwl_async_handler_entry *entry; - if (rx_h->cmd_id != pkt->hdr.cmd) + if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (!rx_h->async) - return rx_h->fn(mvm, rxb, cmd); + if (!rx_h->async) { + rx_h->fn(mvm, rxb); + return; + } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); /* we can't do much... */ if (!entry) - return 0; + return; entry->rxb._page = rxb_steal_page(rxb); entry->rxb._offset = rxb->_offset; @@ -750,8 +765,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode, schedule_work(&mvm->async_handlers_wk); break; } - - return 0; } static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) @@ -903,7 +916,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->restart_fw && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0); + iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, + NULL); } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; @@ -1100,9 +1114,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - /* make sure we have no running tx while configuring the qos */ set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); - synchronize_net(); /* * iwl_mvm_ref_sync takes a reference before checking the flag. @@ -1130,6 +1142,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = false; } + /* make sure we have no running tx while configuring the seqno */ + synchronize_net(); + iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data); ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags, sizeof(wowlan_config_cmd), @@ -1156,15 +1171,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac, iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags); } -static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +struct iwl_mvm_wakeup_reason_iter_data { + struct iwl_mvm *mvm; + u32 wakeup_reasons; +}; + +static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) { - struct iwl_mvm *mvm = data; + struct iwl_mvm_wakeup_reason_iter_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc && - mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - iwl_mvm_connection_loss(mvm, vif, "D0i3"); + data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) { + if (data->wakeup_reasons & + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH) + iwl_mvm_connection_loss(data->mvm, vif, "D0i3"); + else + ieee80211_beacon_loss(vif); + } } void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq) @@ -1232,7 +1257,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) }; struct iwl_wowlan_status *status; int ret; - u32 disconnection_reasons, wakeup_reasons; + u32 handled_reasons, wakeup_reasons; __le16 *qos_seq = NULL; mutex_lock(&mvm->mutex); @@ -1249,13 +1274,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk) IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons); - disconnection_reasons = - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; - if (wakeup_reasons & disconnection_reasons) + handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH; + if (wakeup_reasons & handled_reasons) { + struct iwl_mvm_wakeup_reason_iter_data data = { + .mvm = mvm, + .wakeup_reasons = wakeup_reasons, + }; + ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_d0i3_disconnect_iter, mvm); + iwl_mvm_d0i3_wakeup_reason_iter, &data); + } out: iwl_mvm_d0i3_enable_tx(mvm, qos_seq); @@ -1308,17 +1338,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode) return _iwl_mvm_exit_d0i3(mvm); } -static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode, - struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - - ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight); -} - static const struct iwl_op_mode_ops iwl_mvm_ops = { .start = iwl_op_mode_mvm_start, .stop = iwl_op_mode_mvm_stop, @@ -1332,5 +1351,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = { .nic_config = iwl_mvm_nic_config, .enter_d0i3 = iwl_mvm_enter_d0i3, .exit_d0i3 = iwl_mvm_exit_d0i3, - .napi_add = iwl_mvm_napi_add, }; diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index d2c6ba9d326b..4645877882a6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, static void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_beacon_filter_cmd *cmd) + struct iwl_beacon_filter_cmd *cmd, + bool d0i3) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - if (vif->bss_conf.cqm_rssi_thold) { + if (vif->bss_conf.cqm_rssi_thold && !d0i3) { cmd->bf_energy_delta = cpu_to_le32(vif->bss_conf.cqm_rssi_hyst); /* fw uses an absolute value for this */ @@ -287,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, return true; } -static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi) -{ - int numerator; - int dtim_interval = dtimper * bi; - - if (WARN_ON(!dtim_interval)) - return 0; - - if (dtimper == 1) { - if (bi > 100) - numerator = 408; - else - numerator = 510; - } else if (dtimper < 10) { - numerator = 612; - } else { - return 0; - } - return max(1, (numerator / dtim_interval)); -} - static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) { struct ieee80211_chanctx_conf *chanctx_conf; @@ -357,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); - if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || - !mvmvif->pm_enabled) + if (!vif->bss_conf.ps || !mvmvif->pm_enabled || + (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p)) return; cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); @@ -377,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, if (!radar_detect && (dtimper < 10) && (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP || mvm->cur_ucode == IWL_UCODE_WOWLAN)) { - cmd->skip_dtim_periods = - iwl_mvm_power_get_skip_over_dtim(dtimper, bi); - if (cmd->skip_dtim_periods) - cmd->flags |= - cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + cmd->skip_dtim_periods = 3; } if (mvm->cur_ucode != IWL_UCODE_WOWLAN) { @@ -509,9 +486,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac, ETH_ALEN); } -int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; @@ -520,8 +496,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id); - - return 0; } struct iwl_power_vifs { @@ -810,7 +784,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, vif->type != NL80211_IFTYPE_STATION || vif->p2p) return 0; - iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd); + iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3); if (!d0i3) iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index daff1d0a8e4a..5ae9c8aa868f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) + if (IWL_MVM_RS_DISABLE_P2P_MIMO && + iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p) return false; if (mvm->nvm_data->sku_cap_mimo_disabled) @@ -2403,7 +2404,7 @@ struct rs_init_rate_info { u8 rate_idx; }; -static const struct rs_init_rate_info rs_init_rates_24ghz[] = { +static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -68, IWL_RATE_36M_INDEX }, @@ -2416,7 +2417,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = { { S8_MIN, IWL_RATE_1M_INDEX }, }; -static const struct rs_init_rate_info rs_init_rates_5ghz[] = { +static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = { { -60, IWL_RATE_54M_INDEX }, { -64, IWL_RATE_48M_INDEX }, { -72, IWL_RATE_36M_INDEX }, @@ -2427,6 +2428,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = { { S8_MIN, IWL_RATE_6M_INDEX }, }; +static const struct rs_init_rate_info rs_optimal_rates_ht[] = { + { -60, IWL_RATE_MCS_7_INDEX }, + { -64, IWL_RATE_MCS_6_INDEX }, + { -68, IWL_RATE_MCS_5_INDEX }, + { -72, IWL_RATE_MCS_4_INDEX }, + { -80, IWL_RATE_MCS_3_INDEX }, + { -84, IWL_RATE_MCS_2_INDEX }, + { -85, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { + { -60, IWL_RATE_MCS_8_INDEX }, + { -64, IWL_RATE_MCS_7_INDEX }, + { -68, IWL_RATE_MCS_6_INDEX }, + { -72, IWL_RATE_MCS_5_INDEX }, + { -80, IWL_RATE_MCS_4_INDEX }, + { -84, IWL_RATE_MCS_3_INDEX }, + { -85, IWL_RATE_MCS_2_INDEX }, + { -87, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX}, +}; + +static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { + { -60, IWL_RATE_MCS_9_INDEX }, + { -64, IWL_RATE_MCS_8_INDEX }, + { -68, IWL_RATE_MCS_7_INDEX }, + { -72, IWL_RATE_MCS_6_INDEX }, + { -80, IWL_RATE_MCS_5_INDEX }, + { -84, IWL_RATE_MCS_4_INDEX }, + { -85, IWL_RATE_MCS_3_INDEX }, + { -87, IWL_RATE_MCS_2_INDEX }, + { -88, IWL_RATE_MCS_1_INDEX }, + { S8_MIN, IWL_RATE_MCS_0_INDEX }, +}; + +/* Init the optimal rate based on STA caps + * This combined with rssi is used to report the last tx rate + * to userspace when we haven't transmitted enough frames. + */ +static void rs_init_optimal_rate(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + + if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2; + else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID) + rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO; + else if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate->type = LQ_LEGACY_A; + else + rate->type = LQ_LEGACY_G; + + rate->bw = rs_bw_from_sta_bw(sta); + rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL); + + /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */ + + if (is_mimo(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate; + } else if (is_siso(rate)) { + lq_sta->optimal_rate_mask = lq_sta->active_siso_rate; + } else { + lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate; + + if (lq_sta->band == IEEE80211_BAND_5GHZ) { + lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); + } else { + lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); + } + } + + if (is_vht(rate)) { + if (rate->bw == RATE_MCS_CHAN_WIDTH_20) { + lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_20mhz); + } else { + lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz; + lq_sta->optimal_nentries = + ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); + } + } else if (is_ht(rate)) { + lq_sta->optimal_rates = rs_optimal_rates_ht; + lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht); + } +} + +/* Compute the optimal rate index based on RSSI */ +static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta) +{ + struct rs_rate *rate = &lq_sta->optimal_rate; + int i; + + rate->index = find_first_bit(&lq_sta->optimal_rate_mask, + BITS_PER_LONG); + + for (i = 0; i < lq_sta->optimal_nentries; i++) { + int rate_idx = lq_sta->optimal_rates[i].rate_idx; + + if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) && + (BIT(rate_idx) & lq_sta->optimal_rate_mask)) { + rate->index = rate_idx; + break; + } + } + + rs_dump_rate(mvm, rate, "OPTIMAL RATE"); + return rate; +} + /* Choose an initial legacy rate and antenna to use based on the RSSI * of last Rx */ @@ -2468,12 +2587,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, if (band == IEEE80211_BAND_5GHZ) { rate->type = LQ_LEGACY_A; - initial_rates = rs_init_rates_5ghz; - nentries = ARRAY_SIZE(rs_init_rates_5ghz); + initial_rates = rs_optimal_rates_5ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy); } else { rate->type = LQ_LEGACY_G; - initial_rates = rs_init_rates_24ghz; - nentries = ARRAY_SIZE(rs_init_rates_24ghz); + initial_rates = rs_optimal_rates_24ghz_legacy; + nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy); } if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) { @@ -2496,10 +2615,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_sta, struct ieee80211_rx_status *rx_status) { + int i; + lq_sta->pers.chains = rx_status->chains; lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0]; lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1]; lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2]; + lq_sta->pers.last_rssi = S8_MIN; + + for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) { + if (!(lq_sta->pers.chains & BIT(i))) + continue; + + if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi) + lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i]; + } } /** @@ -2538,6 +2668,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rate = &tbl->rate; rs_get_initial_rate(mvm, lq_sta, band, rate); + rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B); if (rate->ant == ANT_A) @@ -2560,6 +2691,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_lq_sta *lq_sta = mvm_sta; + struct rs_rate *optimal_rate; + u32 last_ucode_rate; if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { /* if vif isn't initialized mvm doesn't know about @@ -2583,8 +2716,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); - info->control.rates[0].count = 1; + + /* Report the optimal rate based on rssi and STA caps if we haven't + * converged yet (too little traffic) or exploring other modulations + */ + if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) { + optimal_rate = rs_get_optimal_rate(mvm, lq_sta); + last_ucode_rate = ucode_rate_from_rs_rate(mvm, + optimal_rate); + iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band, + &txrc->reported_rate); + } } static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@ -2605,6 +2748,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, #endif lq_sta->pers.chains = 0; memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); + lq_sta->pers.last_rssi = S8_MIN; return &sta_priv->lq_sta; } diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h index 2a3da314305a..81314ad9ebe0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/iwlwifi/mvm/rs.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -316,6 +317,14 @@ struct iwl_lq_sta { u8 max_siso_rate_idx; u8 max_mimo2_rate_idx; + /* Optimal rate based on RSSI and STA caps. + * Used only to reflect link speed to userspace. + */ + struct rs_rate optimal_rate; + unsigned long optimal_rate_mask; + const struct rs_init_rate_info *optimal_rates; + int optimal_nentries; + u8 missed_rate_counter; struct iwl_lq_cmd lq; @@ -341,6 +350,7 @@ struct iwl_lq_sta { #endif u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + s8 last_rssi; struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; struct iwl_mvm *drv; } pers; diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index 8f1d93b7a13a..c37c10a423ce 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -61,6 +61,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#include #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" @@ -71,8 +72,7 @@ * Copies the phy information in mvm->last_phy_info, it will be used when the * actual data will come from the fw in the next packet. */ -int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, spin_unlock(&mvm->drv_stats_lock); } #endif - - return 0; } /* @@ -96,6 +94,7 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, * Adds the rxb to a new skb and give it to mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, + struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u32 ampdu_status, u8 crypt_len, @@ -129,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, fraglen, rxb->truesize); } - ieee80211_rx(mvm->hw, skb); + ieee80211_rx_napi(mvm->hw, skb, napi); } /* @@ -237,13 +236,26 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, + struct sk_buff *skb, + u32 status) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mvmvif->features & NETIF_F_RXCSUM && + status & RX_MPDU_RES_STATUS_CSUM_DONE && + status & RX_MPDU_RES_STATUS_CSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + /* * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler * * Handles the actual data of the Rx packet from the fw */ -int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) { struct ieee80211_hdr *hdr; struct ieee80211_rx_status *rx_status; @@ -271,7 +283,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { IWL_ERR(mvm, "alloc_skb failed\n"); - return 0; + return; } rx_status = IEEE80211_SKB_RXCB(skb); @@ -284,14 +296,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n", rx_pkt_status); kfree_skb(skb); - return 0; + return; } if ((unlikely(phy_info->cfg_phy_cnt > 20))) { IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n", phy_info->cfg_phy_cnt); kfree_skb(skb); - return 0; + return; } /* @@ -366,6 +378,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, } } + if (sta && ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_csum(sta, skb, rx_pkt_status); + rcu_read_unlock(); /* set the preamble flag if appropriate */ @@ -429,9 +444,8 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif - iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status, + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, crypt_len, rxb); - return 0; } static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm, @@ -623,10 +637,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, iwl_rx_packet_payload_len(pkt)); } -int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); - return 0; } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5000bfcded61..15055462cd11 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -72,10 +72,60 @@ #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 -struct iwl_mvm_scan_params { - u32 max_out_time; +enum iwl_mvm_scan_type { + IWL_SCAN_TYPE_UNASSOC, + IWL_SCAN_TYPE_WILD, + IWL_SCAN_TYPE_MILD, + IWL_SCAN_TYPE_FRAGMENTED, +}; + +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW, + IWL_MVM_TRAFFIC_MEDIUM, + IWL_MVM_TRAFFIC_HIGH, +}; + +struct iwl_mvm_scan_timing_params { + u32 dwell_active; + u32 dwell_passive; + u32 dwell_fragmented; u32 suspend_time; - bool passive_fragmented; + u32 max_out_time; +}; + +static struct iwl_mvm_scan_timing_params scan_timing[] = { + [IWL_SCAN_TYPE_UNASSOC] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 0, + .max_out_time = 0, + }, + [IWL_SCAN_TYPE_WILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 30, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_MILD] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 120, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_FRAGMENTED] = { + .dwell_active = 10, + .dwell_passive = 110, + .dwell_fragmented = 44, + .suspend_time = 95, + .max_out_time = 44, + }, +}; + +struct iwl_mvm_scan_params { + enum iwl_mvm_scan_type type; u32 n_channels; u16 delay; int n_ssids; @@ -90,15 +140,7 @@ struct iwl_mvm_scan_params { int n_match_sets; struct iwl_scan_probe_req preq; struct cfg80211_match_set *match_sets; - struct _dwell { - u16 passive; - u16 active; - u16 fragmented; - } dwell[IEEE80211_NUM_BANDS]; - struct { - u8 iterations; - u8 full_scan_mul; /* not used for UMAC */ - } schedule[2]; + u8 iterations[2]; }; static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) @@ -147,34 +189,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band, return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant); } -/* - * If req->n_ssids > 0, it means we should do an active scan. - * In case of active scan w/o directed scan, we receive a zero-length SSID - * just to notify that this scan is active and not passive. - * In order to notify the FW of the number of SSIDs we wish to scan (including - * the zero-length one), we need to set the corresponding bits in chan->type, - * one for each SSID, and set the active bit (first). If the first SSID is - * already included in the probe template, so we need to set only - * req->n_ssids - 1 bits in addition to the first bit. - */ -static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, - enum ieee80211_band band, int n_ssids) -{ - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) - return 10; - if (band == IEEE80211_BAND_2GHZ) - return 20 + 3 * (n_ssids + 1); - return 10 + 2 * (n_ssids + 1); -} - -static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, - enum ieee80211_band band) -{ - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL)) - return 110; - return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; -} - static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, struct ieee80211_vif *vif) { @@ -186,90 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, *global_cnt += 1; } -static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_mvm_scan_params *params) +static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) +{ + return IWL_MVM_TRAFFIC_LOW; +} + +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_scan_params *params) { int global_cnt = 0; - enum ieee80211_band band; - u8 frag_passive_dwell = 0; + enum iwl_mvm_traffic_load load; + bool low_latency; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_cnt); if (!global_cnt) - goto not_bound; + return IWL_SCAN_TYPE_UNASSOC; - params->suspend_time = 30; - params->max_out_time = 120; + load = iwl_mvm_get_traffic_load(mvm); + low_latency = iwl_mvm_low_latency(mvm); - if (iwl_mvm_low_latency(mvm)) { - if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) + return IWL_SCAN_TYPE_FRAGMENTED; - params->suspend_time = 105; - /* - * If there is more than one active interface make - * passive scan more fragmented. - */ - frag_passive_dwell = 40; - params->max_out_time = frag_passive_dwell; - } else { - params->suspend_time = 120; - params->max_out_time = 120; - } - } + if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency) + return IWL_SCAN_TYPE_MILD; - if (frag_passive_dwell && - fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { - /* - * P2P device scan should not be fragmented to avoid negative - * impact on P2P device discovery. Configure max_out_time to be - * equal to dwell time on passive channel. Take a longest - * possible value, one that corresponds to 2GHz band - */ - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - u32 passive_dwell = - iwl_mvm_get_passive_dwell(mvm, - IEEE80211_BAND_2GHZ); - params->max_out_time = passive_dwell; - } else { - params->passive_fragmented = true; - } - } - - if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) && - (params->max_out_time > 200)) - params->max_out_time = 200; - -not_bound: - - for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].fragmented = frag_passive_dwell; - - params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm, - band); - params->dwell[band].active = - iwl_mvm_get_active_dwell(mvm, band, params->n_ssids); - } - - IWL_DEBUG_SCAN(mvm, - "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n", - params->max_out_time, params->suspend_time, - params->passive_fragmented); - IWL_DEBUG_SCAN(mvm, - "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n", - params->dwell[IEEE80211_BAND_2GHZ].passive, - params->dwell[IEEE80211_BAND_2GHZ].active, - params->dwell[IEEE80211_BAND_2GHZ].fragmented); - IWL_DEBUG_SCAN(mvm, - "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n", - params->dwell[IEEE80211_BAND_5GHZ].passive, - params->dwell[IEEE80211_BAND_5GHZ].active, - params->dwell[IEEE80211_BAND_5GHZ].fragmented); + return IWL_SCAN_TYPE_WILD; } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) @@ -327,9 +290,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res, return buf; } -int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data; @@ -341,17 +303,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); - return 0; } -int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n"); ieee80211_sched_scan_results(mvm->hw); - - return 0; } static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) @@ -368,9 +326,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status) } } -int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data; @@ -392,9 +349,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR); - IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n", + IWL_DEBUG_SCAN(mvm, + "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d\n", aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + iwl_mvm_ebs_status_str(scan_notif->ebs_status), + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED; } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) { @@ -406,9 +367,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) { WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR); - IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n", + IWL_DEBUG_SCAN(mvm, + "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d (FW)\n", aborted ? "aborted" : "completed", - iwl_mvm_ebs_status_str(scan_notif->ebs_status)); + iwl_mvm_ebs_status_str(scan_notif->ebs_status), + scan_notif->last_schedule_line, + scan_notif->last_schedule_iteration, + __le32_to_cpu(scan_notif->time_after_last_iter)); mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); @@ -426,8 +391,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, mvm->last_ebs_successful = scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS || scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE; - - return 0; } static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) @@ -751,13 +714,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_lmac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; - if (params->passive_fragmented) - cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].fragmented; - cmd->max_out_time = cpu_to_le32(params->max_out_time); - cmd->suspend_time = cpu_to_le32(params->suspend_time); + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); } @@ -794,7 +755,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params) { - return params->schedule[0].iterations + params->schedule[1].iterations; + return params->iterations[0] + params->iterations[1]; } static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, @@ -808,7 +769,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; - if (params->passive_fragmented) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm)) @@ -861,11 +822,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ssid_bitmap <<= 1; cmd->schedule[0].delay = cpu_to_le16(params->interval); - cmd->schedule[0].iterations = params->schedule[0].iterations; - cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul; + cmd->schedule[0].iterations = params->iterations[0]; + cmd->schedule[0].full_scan_mul = 1; cmd->schedule[1].delay = cpu_to_le16(params->interval); - cmd->schedule[1].iterations = params->schedule[1].iterations; - cmd->schedule[1].full_scan_mul = params->schedule[1].iterations; + cmd->schedule[1].iterations = params->iterations[1]; + cmd->schedule[1].full_scan_mul = 1; if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) { cmd->channel_opt[0].flags = @@ -937,9 +898,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) int num_channels = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; - int ret, i, j = 0, cmd_size, data_size; + int ret, i, j = 0, cmd_size; struct iwl_host_cmd cmd = { - .id = SCAN_CFG_CMD, + .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) @@ -951,8 +912,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (!scan_config) return -ENOMEM; - data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr); - scan_config->hdr.size = cpu_to_le16(data_size); scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE | SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS | SCAN_CONFIG_FLAG_SET_TX_CHAINS | @@ -1013,17 +972,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, struct iwl_scan_req_umac *cmd, struct iwl_mvm_scan_params *params) { - cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; - if (params->passive_fragmented) - cmd->fragmented_dwell = - params->dwell[IEEE80211_BAND_2GHZ].fragmented; - cmd->max_out_time = cpu_to_le32(params->max_out_time); - cmd->suspend_time = cpu_to_le32(params->suspend_time); + cmd->active_dwell = scan_timing[params->type].dwell_active; + cmd->passive_dwell = scan_timing[params->type].dwell_passive; + cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented; + cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time); + cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time); cmd->scan_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); - if (iwl_mvm_scan_total_iterations(params) == 0) + if (iwl_mvm_scan_total_iterations(params) == 1) cmd->ooc_priority = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); else @@ -1059,7 +1016,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->passive_fragmented) + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm)) @@ -1099,8 +1056,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return uid; memset(cmd, 0, ksize(cmd)); - cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) - - sizeof(struct iwl_mvm_umac_cmd_hdr)); iwl_mvm_scan_umac_dwell(mvm, cmd, params); @@ -1230,17 +1185,15 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.n_match_sets = 0; params.match_sets = NULL; - params.schedule[0].iterations = 1; - params.schedule[0].full_scan_mul = 0; - params.schedule[1].iterations = 0; - params.schedule[1].full_scan_mul = 0; + params.iterations[0] = 1; + params.iterations[1] = 0; - iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms); + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = SCAN_REQ_UMAC; + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_REGULAR); } else { @@ -1313,10 +1266,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_match_sets = req->n_match_sets; params.match_sets = req->match_sets; - params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS; - params.schedule[0].full_scan_mul = 1; - params.schedule[1].iterations = 0xff; - params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER; + params.iterations[0] = 0; + params.iterations[1] = 0xff; + + params.type = iwl_mvm_get_scan_type(mvm, vif, ¶ms); if (req->interval > U16_MAX) { IWL_DEBUG_SCAN(mvm, @@ -1339,8 +1292,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.delay = req->delay; } - iwl_mvm_scan_calc_dwell(mvm, vif, ¶ms); - ret = iwl_mvm_config_sched_scan_profiles(mvm, req); if (ret) return ret; @@ -1348,7 +1299,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { - hcmd.id = SCAN_REQ_UMAC; + hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; @@ -1374,9 +1325,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_complete *notif = (void *)pkt->data; @@ -1384,7 +1334,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status))) - return 0; + return; /* if the scan is already stopping, we don't need to notify mac80211 */ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { @@ -1395,26 +1345,24 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, } mvm->scan_status &= ~mvm->scan_uid_status[uid]; - IWL_DEBUG_SCAN(mvm, - "Scan completed, uid %u type %u, status %s, EBS status %s\n", + "Scan completed, uid %u type %u, status %s, EBS status %s, Last line %d, Last iteration %d, Time from last iteration %d\n", uid, mvm->scan_uid_status[uid], notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? "completed" : "aborted", - iwl_mvm_ebs_status_str(notif->ebs_status)); + iwl_mvm_ebs_status_str(notif->ebs_status), + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && notif->ebs_status != IWL_SCAN_EBS_INACTIVE) mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; - - return 0; } -int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; @@ -1426,15 +1374,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); - return 0; } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) { - struct iwl_umac_scan_abort cmd = { - .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) - - sizeof(struct iwl_mvm_umac_cmd_hdr)), - }; + struct iwl_umac_scan_abort cmd = {}; int uid, ret; lockdep_assert_held(&mvm->mutex); @@ -1451,7 +1395,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid); - ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, + iwl_cmd_id(SCAN_ABORT_UMAC, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); if (!ret) mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT; @@ -1461,7 +1408,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) { struct iwl_notification_wait wait_scan_done; - static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, + static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC, SCAN_OFFLOAD_COMPLETE, }; int ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 26f076e82149..df216cd0c98f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) { - int i; + int i, max = -1, max_offs = -1; lockdep_assert_held(&mvm->mutex); - i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM); + /* Pick the unused key offset with the highest 'deleted' + * counter. Every time a key is deleted, all the counters + * are incremented and the one that was just deleted is + * reset to zero. Thus, the highest counter is the one + * that was deleted longest ago. Pick that one. + */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (test_bit(i, mvm->fw_key_table)) + continue; + if (mvm->fw_key_deleted[i] > max) { + max = mvm->fw_key_deleted[i]; + max_offs = i; + } + } - if (i == STA_KEY_MAX_NUM) + if (max_offs < 0) return STA_KEY_IDX_INVALID; - __set_bit(i, mvm->fw_key_table); + __set_bit(max_offs, mvm->fw_key_table); - return i; + return max_offs; } static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, @@ -1277,8 +1290,6 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, const u8 *pn; memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); - ieee80211_aes_cmac_calculate_k1_k2(keyconf, - igtk_cmd.K1, igtk_cmd.K2); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | @@ -1479,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; - int ret; + int ret, i; lockdep_assert_held(&mvm->mutex); @@ -1498,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, return -ENOENT; } + /* track which key was deleted last */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (mvm->fw_key_deleted[i] < U8_MAX) + mvm->fw_key_deleted[i]++; + } + mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; + if (sta_id == IWL_MVM_STATION_COUNT) { IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); return 0; @@ -1661,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } -int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; @@ -1671,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, u32 sta_id = le32_to_cpu(notif->sta_id); if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) - return 0; + return; rcu_read_lock(); sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (!IS_ERR_OR_NULL(sta)) ieee80211_sta_eosp(sta); rcu_read_unlock(); - - return 0; } void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 748f5dc3f9f4..eedb215eba3f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); -int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c index a87b506c8c72..fe2fa5650443 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c @@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return; pkt = cmd.resp_pkt; - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n", - pkt->hdr.flags); - goto exit; - } - if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp))) - goto exit; + WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)); /* we don't really care about the response at this point */ -exit: iwl_free_resp(&cmd); } @@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT; } -int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data; @@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, /* can fail sometimes */ if (!le32_to_cpu(notif->status)) { iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); - goto out; + return; } if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT)) - goto out; + return; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls)) - goto out; + return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; @@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, msecs_to_jiffies(delay)); iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE); - -out: - return 0; } static int @@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); - if (info->control.hw_key) - iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb); + hdr = (void *)skb->data; + if (info->control.hw_key) { + if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) { + rcu_read_unlock(); + ret = -EINVAL; + goto out; + } + iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); + } iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, mvmsta->sta_id); - hdr = (void *)skb->data; iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index e472729e5f14..dbd7d544575d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, /* * The Rx handler for time event notifications */ -int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_time_event_notif *notif = (void *)pkt->data; @@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, } unlock: spin_unlock_bh(&mvm->time_event_lock); - - return 0; } static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, @@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, struct iwl_time_event_cmd *te_cmd) { - static const u8 time_event_response[] = { TIME_EVENT_CMD }; + static const u16 time_event_response[] = { TIME_EVENT_CMD }; struct iwl_notification_wait wait_time_event; int ret; @@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; - const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; + const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; @@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC); - time_cmd.apply_time = - cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); + time_cmd.apply_time = cpu_to_le32(0); time_cmd.max_frags = TE_V2_FRAG_NONE; time_cmd.max_delay = cpu_to_le32(max_delay); diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h index de4fbc6d57f1..cbdf8e52a5f1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, /* * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION. */ -int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd); +void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /** * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c new file mode 100644 index 000000000000..380972f8fb82 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tof.c @@ -0,0 +1,304 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "mvm.h" +#include "fw-api-tof.h" + +#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 + +void iwl_mvm_tof_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + + tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (IWL_MVM_TOF_IS_RESPONDER) { + tof_data->responder_cfg.sub_grp_cmd_id = + cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); + tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT; + } +#endif + + tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); + tof_data->range_req.req_timeout = 1; + tof_data->range_req.initiator = 1; + tof_data->range_req.report_policy = 3; + + tof_data->range_req_ext.sub_grp_cmd_id = + cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +void iwl_mvm_tof_clean(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return; + + memset(tof_data, 0, sizeof(*tof_data)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; +} + +static void iwl_tof_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *enabled = _data; + + /* non bss vif exists */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + *enabled = false; +} + +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) +{ + struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; + bool enabled; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_tof_iterator, &enabled); + if (!enabled) { + IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); + return -EINVAL; + } + + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} + +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) +{ + struct iwl_tof_range_abort_cmd cmd = { + .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), + .request_id = id, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", + id, mvm->tof_data.active_range_request); + return -EINVAL; + } + + /* after abort is sent there's no active request anymore */ + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(cmd), &cmd); +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP) { + IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); + return -EIO; + } + + cmd->sta_id = mvmvif->bcast_sta.sta_id; + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(*cmd), cmd); +} +#endif + +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(mvm->tof_data.range_req), }, + /* no copy because of the command size */ + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); + return -EIO; + } + + /* nesting of range requests is not supported in FW */ + if (mvm->tof_data.active_range_request != + IWL_MVM_TOF_RANGE_REQ_MAX_ID) { + IWL_ERR(mvm, "Cannot send range req, already active req %d\n", + mvm->tof_data.active_range_request); + return -EIO; + } + + mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; + + cmd.data[0] = &mvm->tof_data.range_req; + return iwl_mvm_send_cmd(mvm, &cmd); +} + +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + lockdep_assert_held(&mvm->mutex); + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) + return -EINVAL; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); + return -EIO; + } + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, + IWL_ALWAYS_LONG_GROUP, 0), + 0, sizeof(mvm->tof_data.range_req_ext), + &mvm->tof_data.range_req_ext); +} + +static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_range_rsp_ntfy *resp = (void *)data; + + if (resp->request_id != mvm->tof_data.active_range_request) { + IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", + resp->request_id, mvm->tof_data.active_range_request); + return -EIO; + } + + memcpy(&mvm->tof_data.range_resp, resp, + sizeof(struct iwl_tof_range_rsp_ntfy)); + mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; + + return 0; +} + +static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; + + IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); + return 0; +} + +static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) +{ + struct iwl_tof_neighbor_report *report = + (struct iwl_tof_neighbor_report *)data; + + IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", + report->bssid, report->request_token, report->status); + return 0; +} + +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; + + lockdep_assert_held(&mvm->mutex); + + switch (le32_to_cpu(resp->sub_grp_cmd_id)) { + case TOF_RANGE_RESPONSE_NOTIF: + iwl_mvm_tof_range_resp(mvm, resp->data); + break; + case TOF_MCSI_DEBUG_NOTIF: + iwl_mvm_tof_mcsi_notif(mvm, resp->data); + break; + case TOF_NEIGHBOR_REPORT_RSP_NOTIF: + iwl_mvm_tof_nb_report_notif(mvm, resp->data); + break; + default: + IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", + resp->sub_grp_cmd_id); + break; + } +} diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h new file mode 100644 index 000000000000..50ae8adaaa6e --- /dev/null +++ b/drivers/net/wireless/iwlwifi/mvm/tof.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __tof +#define __tof_h__ + +#include "fw-api-tof.h" + +struct iwl_mvm_tof_data { + struct iwl_tof_config_cmd tof_cfg; + struct iwl_tof_range_req_cmd range_req; + struct iwl_tof_range_req_ext_cmd range_req_ext; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_tof_responder_config_cmd responder_cfg; +#endif + struct iwl_tof_range_rsp_ntfy range_resp; + u8 last_abort_id; + u16 active_range_request; +}; + +void iwl_mvm_tof_init(struct iwl_mvm *mvm); +void iwl_mvm_tof_clean(struct iwl_mvm *mvm); +int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); +int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); +int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#ifdef CONFIG_IWLWIFI_DEBUGFS +int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +#endif +#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index 80d07db6e7e8..fe7145c2c98a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -33,6 +33,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, return true; } -int iwl_mvm_temp_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); int temp; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) - return 0; + return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); if (temp < 0) - return 0; + return; iwl_mvm_tt_temp_changed(mvm, temp); - - return 0; } static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) @@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) int iwl_mvm_get_temp(struct iwl_mvm *mvm) { struct iwl_notification_wait wait_temp_notif; - static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; + static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION }; int ret, temp; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 89116864d2a0..6df5aada4f16 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) - tx_cmd->pm_frame_timeout = cpu_to_le16(3); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC); + else if (ieee80211_is_action(fc)) + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); else - tx_cmd->pm_frame_timeout = cpu_to_le16(2); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); /* The spec allows Action frames in A-MPDU, we don't support * it */ WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU); } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { - tx_cmd->pm_frame_timeout = cpu_to_le16(2); + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT); } else { - tx_cmd->pm_frame_timeout = 0; + tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE); } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && @@ -268,19 +270,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, /* * Sets the fields in the Tx cmd that are crypto related */ -void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, - struct iwl_tx_cmd *tx_cmd, - struct sk_buff *skb_frag) +static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int hdrlen) { struct ieee80211_key_conf *keyconf = info->control.hw_key; + u8 *crypto_hdr = skb_frag->data + hdrlen; + u64 pn; switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_CCMP: - tx_cmd->sec_ctl = TX_CMD_SEC_CCM; - memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); - if (info->flags & IEEE80211_TX_CTL_AMPDU) - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG); + case WLAN_CIPHER_SUITE_CCMP_256: + iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); + pn = atomic64_inc_return(&keyconf->tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[2] = 0; + crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); + crypto_hdr[1] = pn >> 8; + crypto_hdr[4] = pn >> 16; + crypto_hdr[5] = pn >> 24; + crypto_hdr[6] = pn >> 32; + crypto_hdr[7] = pn >> 40; break; case WLAN_CIPHER_SUITE_TKIP: @@ -308,7 +320,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, */ static struct iwl_device_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta, u8 sta_id) + int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -325,7 +337,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; if (info->control.hw_key) - iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb); + iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen); iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id); @@ -346,6 +358,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) struct iwl_device_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; u8 sta_id; + int hdrlen = ieee80211_hdrlen(hdr->frame_control); if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU)) return -1; @@ -366,23 +379,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; /* - * If the interface on which frame is sent is the P2P_DEVICE + * If the interface on which the frame is sent is the P2P_DEVICE * or an AP/GO interface use the broadcast station associated - * with it; otherwise use the AUX station. + * with it; otherwise if the interface is a managed interface + * use the AP station associated with it for multicast traffic + * (this is not possible for unicast packets as a TLDS discovery + * response are sent without a station entry); otherwise use the + * AUX station. */ - if (info->control.vif && - (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info->control.vif->type == NL80211_IFTYPE_AP)) { + sta_id = mvm->aux_sta.sta_id; + if (info->control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); - sta_id = mvmvif->bcast_sta.sta_id; - } else { - sta_id = mvm->aux_sta.sta_id; + + if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE || + info->control.vif->type == NL80211_IFTYPE_AP) + sta_id = mvmvif->bcast_sta.sta_id; + else if (info->control.vif->type == NL80211_IFTYPE_STATION && + is_multicast_ether_addr(hdr->addr1)) { + u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); + + if (ap_sta_id != IWL_MVM_STATION_COUNT) + sta_id = ap_sta_id; + } } IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue); - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id); if (!dev_cmd) return -1; @@ -390,7 +414,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control)); + memcpy(tx_cmd->hdr, hdr, hdrlen); if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); @@ -416,9 +440,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, u8 tid = IWL_MAX_TID_COUNT; u8 txq_id = info->hw_queue; bool is_data_qos = false, is_ampdu = false; + int hdrlen; mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; + hdrlen = ieee80211_hdrlen(fc); if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -426,7 +452,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) return -1; - dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id); + dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id); if (!dev_cmd) goto drop; @@ -458,7 +484,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, } /* Copy MAC header from skb into command buffer */ - memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc)); + memcpy(tx_cmd->hdr, hdr, hdrlen); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); @@ -911,8 +937,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, rcu_read_unlock(); } -int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data; @@ -921,8 +946,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, iwl_mvm_rx_tx_cmd_single(mvm, pkt); else iwl_mvm_rx_tx_cmd_agg(mvm, pkt); - - return 0; } static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, @@ -942,8 +965,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, (void *)(uintptr_t)tid_data->rate_n_flags; } -int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; @@ -965,7 +987,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT || tid >= IWL_MAX_TID_COUNT, "sta_id %d tid %d", sta_id, tid)) - return 0; + return; rcu_read_lock(); @@ -974,7 +996,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, /* Reclaiming frames for a station that has been deleted ? */ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); - return 0; + return; } mvmsta = iwl_mvm_sta_from_mac80211(sta); @@ -985,7 +1007,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, "invalid BA notification: Q %d, tid %d, flow %d\n", tid_data->txq_id, tid, scd_flow); rcu_read_unlock(); - return 0; + return; } spin_lock_bh(&mvmsta->lock); @@ -1072,8 +1094,6 @@ out: skb = __skb_dequeue(&reclaimed_skbs); ieee80211_tx_status(mvm->hw, skb); } - - return 0; } /* diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index 03f8e06dded7..a7d434256423 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd) return ret; } -int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id, +int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id, u32 flags, u16 len, const void *data) { struct iwl_host_cmd cmd = { @@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, goto out_free_resp; } - if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - ret = -EIO; - goto out_free_resp; - } - resp_len = iwl_rx_packet_payload_len(pkt); if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { ret = -EIO; @@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd, /* * We assume that the caller set the status to the sucess value */ -int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len, +int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status) { struct iwl_host_cmd cmd = { @@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx) return fw_rate_idx_to_plcp[rate_idx]; } -int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, - struct iwl_device_cmd *cmd) +void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_error_resp *err_resp = (void *)pkt->data; @@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, le32_to_cpu(err_resp->error_service)); IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", le64_to_cpu(err_resp->timestamp)); - return 0; } /* diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 9f65c1cff1b1..b0825c402c73 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -614,6 +614,7 @@ static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; /* Before you put code here, think about WoWLAN. You cannot check here @@ -631,20 +632,16 @@ static int iwl_pci_resume(struct device *device) return 0; /* - * On suspend, ict is disabled, and the interrupt mask - * gets cleared. Reconfigure them both in case of d0i3 - * image. Otherwise, only enable rfkill interrupt (in - * order to keep track of the rfkill status) + * Enable rfkill interrupt (in order to keep track of + * the rfkill status) */ - if (trans->wowlan_d0i3) { - iwl_pcie_reset_ict(trans); - iwl_enable_interrupts(trans); - } else { - iwl_enable_rfkill_int(trans); - } + iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); + + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); return 0; } diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 376b84e54ad7..feb2f7e81134 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,6 +44,21 @@ #include "iwl-io.h" #include "iwl-op-mode.h" +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3) + +/* + * RX related structures and functions + */ +#define RX_NUM_QUEUES 1 +#define RX_POST_REQ_ALLOC 2 +#define RX_CLAIM_REQ_ALLOC 8 +#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) +#define RX_LOW_WATERMARK 8 + struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -77,29 +92,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) - * @pool: - * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free + * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of free SKBs for use - * @rx_used: List of Rx buffers with no SKB + * @rx_free: list of RBDs with allocated RB ready for use + * @rx_used: list of RBDs with no RB attached * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: + * @pool: initial pool of iwl_rx_mem_buffer for the queue + * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; + u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -107,6 +122,32 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; +}; + +/** + * struct iwl_rb_allocator - Rx allocator + * @pool: initial pool of allocator + * @req_pending: number of requests the allcator had not processed yet + * @req_ready: number of requests honored and ready for claiming + * @rbd_allocated: RBDs with pages allocated and ready to be handled to + * the queue. This is a list of &struct iwl_rx_mem_buffer + * @rbd_empty: RBDs with no page attached for allocator use. This is a list + * of &struct iwl_rx_mem_buffer + * @lock: protects the rbd_allocated and rbd_empty lists + * @alloc_wq: work queue for background calls + * @rx_alloc: work struct for background calls + */ +struct iwl_rb_allocator { + struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; + atomic_t req_pending; + atomic_t req_ready; + struct list_head rbd_allocated; + struct list_head rbd_empty; + spinlock_t lock; + struct workqueue_struct *alloc_wq; + struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rx_replenish: work that will be called when buffers need to be allocated + * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -264,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) * @rx_buf_size_8k: 8 kB RX buffer size * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue + * @wide_cmd_header: true when ucode supports wide command header format * @rx_page_order: page order for receive buffer size * @reg_lock: protect hw register access + * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight * @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor @@ -273,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct work_struct rx_replenish; + struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; @@ -285,9 +328,11 @@ struct iwl_trans_pcie { dma_addr_t ict_tbl_dma; int ict_index; bool use_ict; + bool is_down; struct isr_statistics isr_stats; spinlock_t irq_lock; + struct mutex mutex; u32 inta_mask; u32 scd_base_addr; struct iwl_dma_ptr scd_bc_tbls; @@ -314,6 +359,7 @@ struct iwl_trans_pcie { bool rx_buf_size_8k; bool bc_table_dword; bool scd_set_active; + bool wide_cmd_header; u32 rx_page_order; const char *const *command_names; @@ -385,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status); + struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index adad8d0fae7f..e06591f625c4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,16 +74,29 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When - * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled - * to replenish the iwl->rxq->rx_free. - * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the - * iwl->rxq is replenished and the READ INDEX is updated (updating the - * 'processed' and 'read' driver indexes as well) + * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. + * When the interrupt handler is called, the request is processed. + * The page is either stolen - transferred to the upper layer + * or reused - added immediately to the iwl->rxq->rx_free list. + * + When the page is stolen - the driver updates the matching queue's used + * count, detaches the RBD and transfers it to the queue used list. + * When there are two used RBDs - they are transferred to the allocator empty + * list. Work is then scheduled for the allocator to start allocating + * eight buffers. + * When there are another 6 used RBDs - they are transferred to the allocator + * empty list and the driver tries to claim the pre-allocated buffers and + * add them to iwl->rxq->rx_free. If it fails - it continues to claim them + * until ready. + * When there are 8+ buffers in the free list - either from allocation or from + * 8 reused unstolen pages - restock is called to update the FW and indexes. + * + In order to make sure the allocator always has RBDs to use for allocation + * the allocator has initial pool in the size of num_queues*(8-2) - the + * maximum missing RBDs per allocation request (request posted with 2 + * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). + * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + The Host/Firmware iwl->rxq is replenished at irq thread time from the - * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, + * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -92,18 +105,32 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock + * iwl_pcie_rxq_restock. + * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. If insufficient rx_free buffers - * are available, schedules iwl_pcie_rx_replenish + * the WRITE index. + * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. + * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. + * + * RBD life-cycle: + * + * Init: + * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue + * + * Regular Receive interrupt: + * Page Stolen: + * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> + * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue + * Page not Stolen: + * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); - /* If the pre-allocated buffer pool is dropping low, schedule to - * refill it */ - if (rxq->free_count <= RX_LOW_WATERMARK) - schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -254,6 +277,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } } +/* + * iwl_pcie_rx_alloc_page - allocates and returns a page. + * + */ +static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, + gfp_t priority) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rxq *rxq = &trans_pcie->rxq; + struct page *page; + gfp_t gfp_mask = priority; + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", + trans_pcie->rx_page_order); + /* Issue an error if the hardware has consumed more than half + * of its free buffer list and we don't have enough + * pre-allocated buffers. +` */ + if (rxq->free_count <= RX_LOW_WATERMARK && + iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && + net_ratelimit()) + IWL_CRIT(trans, + "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", + rxq->free_count); + return NULL; + } + return page; +} + /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * @@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; - gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } spin_unlock(&rxq->lock); - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, " - "order: %d\n", - trans_pcie->rx_page_order); - - if ((rxq->free_count <= RX_LOW_WATERMARK) && - net_ratelimit()) - IWL_CRIT(trans, "Failed to alloc_pages with %s." - "Only %u free buffers remaining.\n", - priority == GFP_ATOMIC ? - "GFP_ATOMIC" : "GFP_KERNEL", - rxq->free_count); - /* We don't reschedule replenish work here -- we will - * call the restock method and if it still needs - * more buffers it will schedule replenish */ + page = iwl_pcie_rx_alloc_page(trans, priority); + if (!page) return; - } spin_lock(&rxq->lock); @@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called as a scheduled work item (except for during initialization) + * This is called only during initialization */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans) { - iwl_pcie_rxq_alloc_rbs(trans, gfp); + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); iwl_pcie_rxq_restock(trans); } -static void iwl_pcie_rx_replenish_work(struct work_struct *data) +/* + * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues + * + * Allocates for each received request 8 pages + * Called as a scheduled work item. + */ +static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = - container_of(data, struct iwl_trans_pcie, rx_replenish); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct list_head local_empty; + int pending = atomic_xchg(&rba->req_pending, 0); - iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); + IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); + + /* If we were scheduled - there is at least one request */ + spin_lock(&rba->lock); + /* swap out the rba->rbd_empty to a local list */ + list_replace_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + while (pending) { + int i; + struct list_head local_allocated; + + INIT_LIST_HEAD(&local_allocated); + + for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { + struct iwl_rx_mem_buffer *rxb; + struct page *page; + + /* List should never be empty - each reused RBD is + * returned to the list, and initial pool covers any + * possible gap between the time the page is allocated + * to the time the RBD is added. + */ + BUG_ON(list_empty(&local_empty)); + /* Get the first rxb from the rbd list */ + rxb = list_first_entry(&local_empty, + struct iwl_rx_mem_buffer, list); + BUG_ON(rxb->page); + + /* Alloc a new receive buffer */ + page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + if (!page) + continue; + rxb->page = page; + + /* Get physical address of the RB */ + rxb->page_dma = dma_map_page(trans->dev, page, 0, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + if (dma_mapping_error(trans->dev, rxb->page_dma)) { + rxb->page = NULL; + __free_pages(page, trans_pcie->rx_page_order); + continue; + } + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + /* move the allocated entry to the out list */ + list_move(&rxb->list, &local_allocated); + i++; + } + + pending--; + if (!pending) { + pending = atomic_xchg(&rba->req_pending, 0); + IWL_DEBUG_RX(trans, + "Pending allocation requests = %d\n", + pending); + } + + spin_lock(&rba->lock); + /* add the allocated rbds to the allocator allocated list */ + list_splice_tail(&local_allocated, &rba->rbd_allocated); + /* get more empty RBDs for current pending requests */ + list_splice_tail_init(&rba->rbd_empty, &local_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_ready); + } + + spin_lock(&rba->lock); + /* return unused rbds to the allocator empty list */ + list_splice_tail(&local_empty, &rba->rbd_empty); + spin_unlock(&rba->lock); +} + +/* + * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages +.* +.* Called by queue when the queue posted allocation request and + * has freed 8 RBDs in order to restock itself. + */ +static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rx_mem_buffer + *out[RX_CLAIM_REQ_ALLOC]) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + /* + * atomic_dec_if_positive returns req_ready - 1 for any scenario. + * If req_ready is 0 atomic_dec_if_positive will return -1 and this + * function will return -ENOMEM, as there are no ready requests. + * atomic_dec_if_positive will perofrm the *actual* decrement only if + * req_ready > 0, i.e. - there are ready requests and the function + * hands one request to the caller. + */ + if (atomic_dec_if_positive(&rba->req_ready) < 0) + return -ENOMEM; + + spin_lock(&rba->lock); + for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { + /* Get next free Rx buffer, remove it from free list */ + out[i] = list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + list_del(&out[i]->list); + } + spin_unlock(&rba->lock); + + return 0; +} + +static void iwl_pcie_rx_allocator_work(struct work_struct *data) +{ + struct iwl_rb_allocator *rba_p = + container_of(data, struct iwl_rb_allocator, rx_alloc); + struct iwl_trans_pcie *trans_pcie = + container_of(rba_p, struct iwl_trans_pcie, rba); + + iwl_pcie_rx_allocator(trans_pcie->trans); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); + spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; + rxq->used_count = 0; - for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } +static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) +{ + int i; + + lockdep_assert_held(&rba->lock); + + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); + + for (i = 0; i < RX_POOL_SIZE; i++) + list_add(&rba->pool[i].list, &rba->rbd_empty); +} + +static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; + + lockdep_assert_held(&rba->lock); + + for (i = 0; i < RX_POOL_SIZE; i++) { + if (!rba->pool[i].page) + continue; + dma_unmap_page(trans->dev, rba->pool[i].page_dma, + PAGE_SIZE << trans_pcie->rx_page_order, + DMA_FROM_DEVICE); + __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); + rba->pool[i].page = NULL; + } +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } + if (!rba->alloc_wq) + rba->alloc_wq = alloc_workqueue("rb_allocator", + WQ_HIGHPRI | WQ_UNBOUND, 1); + INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); + + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); + /* free all first - we might be reconfigured for a different size */ + iwl_pcie_rx_free_rba(trans); + iwl_pcie_rx_init_rba(rba); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); - INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); - /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_KERNEL); + iwl_pcie_rx_replenish(trans); iwl_pcie_rx_hw_init(trans, rxq); @@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&trans_pcie->rx_replenish); + cancel_work_sync(&rba->rx_alloc); + if (rba->alloc_wq) { + destroy_workqueue(rba->alloc_wq); + rba->alloc_wq = NULL; + } + + spin_lock(&rba->lock); + iwl_pcie_rx_free_rba(trans); + spin_unlock(&rba->lock); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } +/* + * iwl_pcie_rx_reuse_rbd - Recycle used RBDs + * + * Called when a RBD can be reused. The RBD is transferred to the allocator. + * When there are 2 empty RBDs - a request for allocation is posted + */ +static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, + struct iwl_rx_mem_buffer *rxb, + struct iwl_rxq *rxq, bool emergency) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + + /* Move the RBD to the used list, will be moved to allocator in batches + * before claiming or posting a request*/ + list_add_tail(&rxb->list, &rxq->rx_used); + + if (unlikely(emergency)) + return; + + /* Count the allocator owned RBDs */ + rxq->used_count++; + + /* If we have RX_POST_REQ_ALLOC new released rx buffers - + * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is + * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, + * after but we still need to post another request. + */ + if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { + /* Move the 2 RBDs to the allocator ownership. + Allocator has another 6 from pool for the request completion*/ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + + atomic_inc(&rba->req_pending); + queue_work(rba->alloc_wq, &rba->rx_alloc); + } +} + static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb) + struct iwl_rx_mem_buffer *rxb, + bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; @@ -583,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; - struct iwl_device_cmd *cmd; u16 sequence; bool reclaim; - int index, cmd_index, err, len; + int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { ._offset = offset, ._rx_page_order = trans_pcie->rx_page_order, @@ -634,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - if (reclaim) - cmd = txq->entries[cmd_index].cmd; - else - cmd = NULL; - - err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); + iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -657,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * iwl_trans_send_cmd() * as we reclaim the driver command queue */ if (!rxcb._page_stolen) - iwl_pcie_hcmd_complete(trans, &rxcb, err); + iwl_pcie_hcmd_complete(trans, &rxcb); else IWL_WARN(trans, "Claim null rxb?\n"); } @@ -688,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - list_add_tail(&rxb->list, &rxq->rx_used); + iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } /* @@ -704,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i; - u8 fill_rx = 0; - u32 count = 8; - int total_empty; + u32 r, i, j, count = 0; + bool emergency = false; restart: spin_lock(&rxq->lock); @@ -720,36 +977,74 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); - /* calculate total frames need to be restock after handling RX */ - total_empty = r - rxq->write_actual; - if (total_empty < 0) - total_empty += RX_QUEUE_SIZE; - - if (total_empty > (RX_QUEUE_SIZE / 2)) - fill_rx = 1; - while (i != r) { struct iwl_rx_mem_buffer *rxb; + if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + emergency = true; + rxb = rxq->queue[i]; rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", r, i, rxb); - iwl_pcie_rx_handle_rb(trans, rxb); + iwl_pcie_rx_handle_rb(trans, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; - /* If there are a lot of unused frames, - * restock the Rx queue so ucode wont assert. */ - if (fill_rx) { - count++; - if (count >= 8) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - count = 0; - goto restart; + + /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + struct iwl_rb_allocator *rba = &trans_pcie->rba; + struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && + !emergency) { + /* Add the remaining 6 empty RBDs + * for allocator use + */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, + &rba->rbd_empty); + spin_unlock(&rba->lock); } + + /* If not ready - continue, will try to reclaim later. + * No need to reschedule work - allocator exits only on + * success */ + if (!iwl_pcie_rx_allocator_get(trans, out)) { + /* If success - then RX_CLAIM_REQ_ALLOC + * buffers were retrieved and should be added + * to free list */ + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { + list_add_tail(&out[j]->list, + &rxq->rx_free); + rxq->free_count++; + } + } + } + if (emergency) { + count++; + if (count == 8) { + count = 0; + if (rxq->used_count < RX_QUEUE_SIZE / 3) + emergency = false; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + spin_lock(&rxq->lock); + } + } + /* handle restock for three cases, can be all of them at once: + * - we just pulled buffers from the allocator + * - we have 8+ unstolen pages accumulated + * - we are in emergency and allocated buffers + */ + if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rxq_restock(trans); + goto restart; } } @@ -757,10 +1052,20 @@ restart: rxq->read = i; spin_unlock(&rxq->lock); - if (fill_rx) - iwl_pcie_rx_replenish(trans, GFP_ATOMIC); - else - iwl_pcie_rxq_restock(trans); + /* + * handle a case where in emergency there are some unallocated RBDs. + * those RBDs are in the used list, but are not tracked by the queue's + * used_count which counts allocator owned RBDs. + * unallocated emergency RBDs must be allocated on exit, otherwise + * when called again the function may not be in emergency mode and + * they will be handed to the allocator with no tracking in the RBD + * allocator counters, which will lead to them never being claimed back + * by the queue. + * by allocating them here, they are now in the queue free list, and + * will be restocked by the next call of iwl_pcie_rxq_restock. + */ + if (unlikely(emergency && count)) + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); @@ -772,6 +1077,7 @@ restart: static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ if (trans->cfg->internal_wimax_coex && @@ -795,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) iwl_trans_fw_error(trans); local_bh_enable(); + for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) + del_timer(&trans_pcie->txq[i].stuck_timer); + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); wake_up(&trans_pcie->wait_command_queue); } @@ -1003,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rfkill++; + mutex_lock(&trans_pcie->mutex); iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { set_bit(STATUS_RFKILL, &trans->status); if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, @@ -1195,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans) val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; - val |= CSR_DRAM_INT_TBL_ENABLE; - val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; + val |= CSR_DRAM_INT_TBL_ENABLE | + CSR_DRAM_INIT_TBL_WRAP_CHECK | + CSR_DRAM_INIT_TBL_WRITE_POINTER; IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 6203c4ad9bba..6ba7d300b08f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_WAKE_ME); - else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE | CSR_HW_IF_CONFIG_REG_ENABLE_PME); + mdelay(1); + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + } mdelay(5); } @@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + msleep(1); + for (iter = 0; iter < 10; iter++) { /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) do { ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + if (ret >= 0) { + ret = 0; + goto out; + } usleep_range(200, 1000); t += 200; @@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) IWL_ERR(trans, "Couldn't prepare the card\n"); +out: + iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + return ret; } @@ -764,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -813,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) { last_read_idx = i; + /* + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between + * CPU1 to CPU2. + * PAGING_SEPARATOR_SECTION delimiter - separate between + * CPU2 non paged to CPU2 paging sec. + */ if (!image->sec[i].data || - image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) { + image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || + image->sec[i].offset == PAGING_SEPARATOR_SECTION) { IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n", i); @@ -881,6 +911,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) case PRPH_CLEARBIT: iwl_clear_bits_prph(trans, addr, BIT(val)); break; + case PRPH_BLOCKBIT: + if (iwl_read_prph(trans, addr) & BIT(val)) { + IWL_ERR(trans, + "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", + val, addr); + goto monitor; + } + break; default: IWL_ERR(trans, "FW debug - unknown OP %d\n", dest->reg_ops[i].op); @@ -888,6 +926,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans) } } +monitor: if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) { iwl_write_prph(trans, le32_to_cpu(dest->base_reg), trans_pcie->fw_mon_phys >> dest->base_shift); @@ -982,13 +1021,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { - int ret; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; + int ret; + + mutex_lock(&trans_pcie->mutex); + + /* Someone called stop_device, don't try to start_fw */ + if (trans_pcie->is_down) { + IWL_WARN(trans, + "Can't start_fw since the HW hasn't been started\n"); + ret = EIO; + goto out; + } /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - return -EIO; + ret = -EIO; + goto out; } iwl_enable_rfkill_int(trans); @@ -1000,15 +1051,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, else clear_bit(STATUS_RFKILL, &trans->status); iwl_trans_pcie_rf_kill(trans, hw_rfkill); - if (hw_rfkill && !run_in_rfkill) - return -ERFKILL; + if (hw_rfkill && !run_in_rfkill) { + ret = -ERFKILL; + goto out; + } iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_pcie_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); - return ret; + goto out; } /* make sure rfkill handshake bits are cleared */ @@ -1026,9 +1079,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* Load the given image to the HW */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - return iwl_pcie_load_given_ucode_8000(trans, fw); + ret = iwl_pcie_load_given_ucode_8000(trans, fw); else - return iwl_pcie_load_given_ucode(trans, fw); + ret = iwl_pcie_load_given_ucode(trans, fw); + +out: + mutex_unlock(&trans_pcie->mutex); + return ret; } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) @@ -1037,11 +1094,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) iwl_pcie_tx_start(trans, scd_addr); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill, was_hw_rfkill; + lockdep_assert_held(&trans_pcie->mutex); + + if (trans_pcie->is_down) + return; + + trans_pcie->is_down = true; + was_hw_rfkill = iwl_is_rfkill_set(trans); /* tell the device to stop sending interrupts */ @@ -1131,14 +1195,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } +static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + mutex_lock(&trans_pcie->mutex); + _iwl_trans_pcie_stop_device(trans, low_power); + mutex_unlock(&trans_pcie->mutex); +} + void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) { + struct iwl_trans_pcie __maybe_unused *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + + lockdep_assert_held(&trans_pcie->mutex); + if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) - iwl_trans_pcie_stop_device(trans, true); + _iwl_trans_pcie_stop_device(trans, true); } static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->wowlan_d0i3) { + /* Enable persistence mode to avoid reset */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PERSIST_MODE); + } + iwl_disable_interrupts(trans); /* @@ -1150,17 +1236,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_pcie_disable_ict(trans); + synchronize_irq(trans_pcie->pci_dev->irq); + iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - /* - * reset TX queues -- some of their registers reset during S3 - * so if we don't reset everything here the D3 image would try - * to execute some invalid memory upon resume - */ - iwl_trans_pcie_tx_reset(trans); + if (!trans->wowlan_d0i3) { + /* + * reset TX queues -- some of their registers reset during S3 + * so if we don't reset everything here the D3 image would try + * to execute some invalid memory upon resume + */ + iwl_trans_pcie_tx_reset(trans); + } iwl_pcie_set_pwr(trans, true); } @@ -1202,12 +1292,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - iwl_trans_pcie_tx_reset(trans); + if (trans->wowlan_d0i3) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + } else { + iwl_trans_pcie_tx_reset(trans); - ret = iwl_pcie_rx_init(trans); - if (ret) { - IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); - return ret; + ret = iwl_pcie_rx_init(trans); + if (ret) { + IWL_ERR(trans, + "Failed to resume the device (RX reset)\n"); + return ret; + } } val = iwl_read32(trans, CSR_RESET); @@ -1219,11 +1315,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; int err; + lockdep_assert_held(&trans_pcie->mutex); + err = iwl_pcie_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); @@ -1240,20 +1339,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); + /* Set is_down to false here so that...*/ + trans_pcie->is_down = false; + hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) set_bit(STATUS_RFKILL, &trans->status); else clear_bit(STATUS_RFKILL, &trans->status); + /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); return 0; } +static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + mutex_lock(&trans_pcie->mutex); + ret = _iwl_trans_pcie_start_hw(trans, low_power); + mutex_unlock(&trans_pcie->mutex); + + return ret; +} + static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + mutex_lock(&trans_pcie->mutex); + /* disable interrupts - don't enable HW RF kill interrupt */ spin_lock(&trans_pcie->irq_lock); iwl_disable_interrupts(trans); @@ -1266,6 +1383,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); iwl_pcie_disable_ict(trans); + + mutex_unlock(&trans_pcie->mutex); + + synchronize_irq(trans_pcie->pci_dev->irq); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1326,6 +1447,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, else trans_pcie->rx_page_order = get_order(4 * 1024); + trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; @@ -1338,11 +1460,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { + if (!trans_pcie->napi.poll) { init_dummy_netdev(&trans_pcie->napi_dev); - iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, - &trans_pcie->napi_dev, - iwl_pcie_dummy_napi_poll, 64); + netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, + iwl_pcie_dummy_napi_poll, 64); } } @@ -2169,6 +2290,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, return prph_len; } +static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + int allocated_rb_nums) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + struct iwl_rxq *rxq = &trans_pcie->rxq; + u32 i, r, j, rb_len = 0; + + spin_lock(&rxq->lock); + + r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + + for (i = rxq->read, j = 0; + i != r && j < allocated_rb_nums; + i = (i + 1) & RX_QUEUE_MASK, j++) { + struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; + struct iwl_fw_error_dump_rb *rb; + + dma_unmap_page(trans->dev, rxb->page_dma, max_len, + DMA_FROM_DEVICE); + + rb_len += sizeof(**data) + sizeof(*rb) + max_len; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); + (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); + rb = (void *)(*data)->data; + rb->index = cpu_to_le32(i); + memcpy(rb->data, page_address(rxb->page), max_len); + /* remap the page for the free benefit */ + rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0, + max_len, + DMA_FROM_DEVICE); + + *data = iwl_fw_error_next_data(*data); + } + + spin_unlock(&rxq->lock); + + return rb_len; +} #define IWL_CSR_TO_DUMP (0x250) static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, @@ -2238,17 +2400,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, return monitor_len; } -static -struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) +static u32 +iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + u32 monitor_len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 len = 0; + + if ((trans_pcie->fw_mon_page && + trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || + trans->dbg_dest_tlv) { + struct iwl_fw_error_dump_fw_mon *fw_mon_data; + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->dbg_dest_tlv) { + write_ptr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); + fw_mon_data = (void *)(*data)->data; + fw_mon_data->fw_mon_wr_ptr = + cpu_to_le32(iwl_read_prph(trans, write_ptr)); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); + + len += sizeof(**data) + sizeof(*fw_mon_data); + if (trans_pcie->fw_mon_page) { + /* + * The firmware is now asserted, it won't write anything + * to the buffer. CPU can take ownership to fetch the + * data. The buffer will be handed back to the device + * before the firmware will be restarted. + */ + dma_sync_single_for_cpu(trans->dev, + trans_pcie->fw_mon_phys, + trans_pcie->fw_mon_size, + DMA_FROM_DEVICE); + memcpy(fw_mon_data->data, + page_address(trans_pcie->fw_mon_page), + trans_pcie->fw_mon_size); + + monitor_len = trans_pcie->fw_mon_size; + } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { + /* + * Update pointers to reflect actual values after + * shifting + */ + base = iwl_read_prph(trans, base) << + trans->dbg_dest_tlv->base_shift; + iwl_trans_read_mem(trans, base, fw_mon_data->data, + monitor_len / sizeof(u32)); + } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { + monitor_len = + iwl_trans_pci_dump_marbh_monitor(trans, + fw_mon_data, + monitor_len); + } else { + /* Didn't match anything - output no monitor data */ + monitor_len = 0; + } + + len += monitor_len; + (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); + } + + return len; +} + +static struct iwl_trans_dump_data +*iwl_trans_pcie_dump_data(struct iwl_trans *trans, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len; + u32 len, num_rbs; u32 monitor_len; int i, ptr; + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); /* transport dump header */ len = sizeof(*dump_data); @@ -2257,22 +2499,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += sizeof(*data) + cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); - /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; - - /* PRPH registers */ - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - - /* FH registers */ - len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); - /* FW monitor */ if (trans_pcie->fw_mon_page) { len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) + @@ -2300,6 +2526,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) monitor_len = 0; } + if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + dump_data = vzalloc(len); + if (!dump_data) + return NULL; + + data = (void *)dump_data->data; + len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + dump_data->len = len; + + return dump_data; + } + + /* CSR registers */ + len += sizeof(*data) + IWL_CSR_TO_DUMP; + + /* PRPH registers */ + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + + /* FH registers */ + len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + + if (dump_rbs) { + /* RBs */ + num_rbs = le16_to_cpu(ACCESS_ONCE( + trans_pcie->rxq.rb_stts->closed_rb_num)) + & 0x0FFF; + num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + len += num_rbs * (sizeof(*data) + + sizeof(struct iwl_fw_error_dump_rb) + + (PAGE_SIZE << trans_pcie->rx_page_order)); + } + dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -2336,74 +2601,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans) len += iwl_trans_pcie_dump_prph(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data); len += iwl_trans_pcie_fh_regs_dump(trans, &data); - /* data is already pointing to the next section */ + if (dump_rbs) + len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); - if ((trans_pcie->fw_mon_page && - trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - trans->dbg_dest_tlv) { - struct iwl_fw_error_dump_fw_mon *fw_mon_data; - u32 base, write_ptr, wrap_cnt; - - /* If there was a dest TLV - use the values from there */ - if (trans->dbg_dest_tlv) { - write_ptr = - le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); - wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - } else { - base = MON_BUFF_BASE_ADDR; - write_ptr = MON_BUFF_WRPTR; - wrap_cnt = MON_BUFF_CYCLE_CNT; - } - - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); - fw_mon_data = (void *)data->data; - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); - fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); - fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, base)); - - len += sizeof(*data) + sizeof(*fw_mon_data); - if (trans_pcie->fw_mon_page) { - /* - * The firmware is now asserted, it won't write anything - * to the buffer. CPU can take ownership to fetch the - * data. The buffer will be handed back to the device - * before the firmware will be restarted. - */ - dma_sync_single_for_cpu(trans->dev, - trans_pcie->fw_mon_phys, - trans_pcie->fw_mon_size, - DMA_FROM_DEVICE); - memcpy(fw_mon_data->data, - page_address(trans_pcie->fw_mon_page), - trans_pcie->fw_mon_size); - - monitor_len = trans_pcie->fw_mon_size; - } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { - /* - * Update pointers to reflect actual values after - * shifting - */ - base = iwl_read_prph(trans, base) << - trans->dbg_dest_tlv->base_shift; - iwl_trans_read_mem(trans, base, fw_mon_data->data, - monitor_len / sizeof(u32)); - } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) { - monitor_len = - iwl_trans_pci_dump_marbh_monitor(trans, - fw_mon_data, - monitor_len); - } else { - /* Didn't match anything - output no monitor data */ - monitor_len = 0; - } - - len += monitor_len; - data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); - } + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; @@ -2466,12 +2667,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (!trans) return ERR_PTR(-ENOMEM); + trans->max_skb_frags = IWL_PCIE_MAX_FRAGS; + trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); spin_lock_init(&trans_pcie->reg_lock); spin_lock_init(&trans_pcie->ref_lock); + mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); ret = pci_enable_device(pdev); diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 2b86c2135de3..a8c8a4a7420b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; @@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* first TB is never freed - it's the scratchbuf data */ - for (i = 1; i < num_tbs; i++) - dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } tfd->num_tbs = 0; } @@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - return 0; + return num_tbs; } static int iwl_pcie_txq_alloc(struct iwl_trans *trans, @@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1320,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, int idx; u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - copy_size = sizeof(out_cmd->hdr); - cmd_size = sizeof(out_cmd->hdr); + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); @@ -1416,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_meta->source = cmd; /* set up the header */ + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); - out_cmd->hdr.cmd = cmd->id; - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } /* and copy the data that needs to be copied */ - cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; @@ -1464,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } IWL_DEBUG_HC(trans, - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the scratchbuf */ @@ -1516,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && txq->wd_timeout) @@ -1552,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status) + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1599,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; - meta->source->handler_status = handler_status; } iwl_pcie_cmdq_reclaim(trans, txq_id, index); @@ -1762,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; @@ -1771,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void *tb1_addr; u16 len, tb1_len, tb2_len; bool wait_write_ptr; - __le16 fc = hdr->frame_control; - u8 hdr_len = ieee80211_hdrlen(fc); + __le16 fc; + u8 hdr_len; u16 wifi_seq; + int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1782,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + spin_lock(&txq->lock); /* In AGG mode, the index in the ring must correspond to the WiFi @@ -1812,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command @@ -1845,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* * Set up TFD's third entry to point directly to remainder - * of skb, if any (802.11 null frames have no payload). + * of skb's head, if any */ - tb2_len = skb->len - hdr_len; + tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { dma_addr_t tb2_phys = dma_map_single(trans->dev, skb->data + hdr_len, @@ -1860,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); } + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); @@ -1869,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, tb2_len); + hdr_len, skb->len - hdr_len); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 99e873dc8684..520bef80747f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2399,6 +2399,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); @@ -2676,7 +2677,7 @@ static void hwsim_mon_setup(struct net_device *dev) dev->netdev_ops = &hwsim_netdev_ops; dev->destructor = free_netdev; ether_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; eth_zero_addr(dev->dev_addr); dev->dev_addr[0] = 0x12; @@ -3120,8 +3121,10 @@ static int hwsim_init_netlink(void) goto failure; rc = netlink_register_notifier(&hwsim_netlink_notifier); - if (rc) + if (rc) { + genl_unregister_family(&hwsim_genl_family); goto failure; + } return 0; diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 7217da4f1543..57a80cfa39b1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -112,7 +112,9 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, if (!skb) return; - ieee80211_rx_ni(dev->hw, skb); + spin_lock(&dev->mac_lock); + ieee80211_rx(dev->hw, skb); + spin_unlock(&dev->mac_lock); } static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) @@ -236,23 +238,42 @@ static void mt7601u_complete_tx(struct urb *urb) skb = q->e[q->start].skb; trace_mt_tx_dma_done(dev, skb); - mt7601u_tx_status(dev, skb); + __skb_queue_tail(&dev->tx_skb_done, skb); + tasklet_schedule(&dev->tx_tasklet); if (q->used == q->entries - q->entries / 8) ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); q->start = (q->start + 1) % q->entries; q->used--; +out: + spin_unlock_irqrestore(&dev->tx_lock, flags); +} - if (urb->status) - goto out; +static void mt7601u_tx_tasklet(unsigned long data) +{ + struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct sk_buff_head skbs; + unsigned long flags; + + __skb_queue_head_init(&skbs); + + spin_lock_irqsave(&dev->tx_lock, flags); set_bit(MT7601U_STATE_MORE_STATS, &dev->state); if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) queue_delayed_work(dev->stat_wq, &dev->stat_work, msecs_to_jiffies(10)); -out: + + skb_queue_splice_init(&dev->tx_skb_done, &skbs); + spin_unlock_irqrestore(&dev->tx_lock, flags); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + + mt7601u_tx_status(dev, skb); + } } static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, @@ -475,6 +496,7 @@ int mt7601u_dma_init(struct mt7601u_dev *dev) { int ret = -ENOMEM; + tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev); tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev); ret = mt7601u_alloc_tx(dev); @@ -502,4 +524,6 @@ void mt7601u_dma_cleanup(struct mt7601u_dev *dev) mt7601u_free_rx(dev); mt7601u_free_tx(dev); + + tasklet_kill(&dev->tx_tasklet); } diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c index df3dd56199a7..26190fd33407 100644 --- a/drivers/net/wireless/mediatek/mt7601u/init.c +++ b/drivers/net/wireless/mediatek/mt7601u/init.c @@ -454,8 +454,10 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) spin_lock_init(&dev->tx_lock); spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); + spin_lock_init(&dev->mac_lock); spin_lock_init(&dev->con_mon_lock); atomic_set(&dev->avg_ampdu_len, 1); + skb_queue_head_init(&dev->tx_skb_done); dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0); if (!dev->stat_wq) { diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index 7514bce1ac91..e21c53ed09fb 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -181,7 +181,11 @@ void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat) } mt76_mac_fill_tx_status(dev, &info, stat); + + spin_lock_bh(&dev->mac_lock); ieee80211_tx_status_noskb(dev->hw, sta, &info); + spin_unlock_bh(&dev->mac_lock); + rcu_read_unlock(); } diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h index 9102be6b95cb..428bd2f10b7b 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -141,12 +141,13 @@ enum { /** * struct mt7601u_dev - adapter structure * @lock: protects @wcid->tx_rate. + * @mac_lock: locks out mac80211's tx status and rx paths. * @tx_lock: protects @tx_q and changes of MT7601U_STATE_*_STATS - flags in @state. + * flags in @state. * @rx_lock: protects @rx_q. * @con_mon_lock: protects @ap_bssid, @bcn_*, @avg_rssi. * @mutex: ensures exclusive access from mac80211 callbacks. - * @vendor_req_mutex: ensures atomicity of vendor requests. + * @vendor_req_mutex: protects @vend_buf, ensures atomicity of split writes. * @reg_atomic_mutex: ensures atomicity of indirect register accesses * (accesses to RF and BBP). * @hw_atomic_mutex: ensures exclusive access to HW during critical @@ -177,6 +178,7 @@ struct mt7601u_dev { struct mt76_wcid __rcu *wcid[N_WCIDS]; spinlock_t lock; + spinlock_t mac_lock; const u16 *beacon_offsets; @@ -184,6 +186,8 @@ struct mt7601u_dev { struct mt7601u_eeprom_params *ee; struct mutex vendor_req_mutex; + void *vend_buf; + struct mutex reg_atomic_mutex; struct mutex hw_atomic_mutex; @@ -197,7 +201,9 @@ struct mt7601u_dev { /* TX */ spinlock_t tx_lock; + struct tasklet_struct tx_tasklet; struct mt7601u_tx_queue *tx_q; + struct sk_buff_head tx_skb_done; atomic_t avg_ampdu_len; diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c index 0be2080ceab3..a0a33dc8f6bc 100644 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c @@ -116,7 +116,10 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) ieee80211_tx_info_clear_status(info); info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; + + spin_lock(&dev->mac_lock); ieee80211_tx_status(dev->hw, skb); + spin_unlock(&dev->mac_lock); } static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index 54dba4001865..416c6045ff31 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -92,10 +92,9 @@ void mt7601u_complete_urb(struct urb *urb) complete(cmpl); } -static int -__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen) +int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, + const u8 direction, const u16 val, const u16 offset, + void *buf, const size_t buflen) { int i, ret; struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); @@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, trace_mt_vend_req(dev, pipe, req, req_type, val, offset, buf, buflen, ret); + if (ret == -ENODEV) + set_bit(MT7601U_STATE_REMOVED, &dev->state); if (ret >= 0 || ret == -ENODEV) return ret; @@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, return ret; } -int -mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, - const u8 direction, const u16 val, const u16 offset, - void *buf, const size_t buflen) -{ - int ret; - - mutex_lock(&dev->vendor_req_mutex); - - ret = __mt7601u_vendor_request(dev, req, direction, val, offset, - buf, buflen); - if (ret == -ENODEV) - set_bit(MT7601U_STATE_REMOVED, &dev->state); - - mutex_unlock(&dev->vendor_req_mutex); - - return ret; -} - void mt7601u_vendor_reset(struct mt7601u_dev *dev) { mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, @@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev) u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { int ret; - __le32 reg; - u32 val; + u32 val = ~0; WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); + mutex_lock(&dev->vendor_req_mutex); + ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, - 0, offset, ®, sizeof(reg)); - val = le32_to_cpu(reg); - if (ret > 0 && ret != sizeof(reg)) { + 0, offset, dev->vend_buf, MT_VEND_BUF); + if (ret == MT_VEND_BUF) + val = get_unaligned_le32(dev->vend_buf); + else if (ret > 0) dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", ret, offset); - val = ~0; - } + + mutex_unlock(&dev->vendor_req_mutex); trace_reg_read(dev, offset, val); return val; @@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, { int ret; + mutex_lock(&dev->vendor_req_mutex); + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val & 0xffff, offset, NULL, 0); - if (ret) - return ret; - return mt7601u_vendor_request(dev, req, USB_DIR_OUT, - val >> 16, offset + 2, NULL, 0); + if (!ret) + ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, + val >> 16, offset + 2, NULL, 0); + + mutex_unlock(&dev->vendor_req_mutex); + + return ret; } void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) @@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf, usb_set_intfdata(usb_intf, dev); + dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL); + if (!dev->vend_buf) { + ret = -ENOMEM; + goto err; + } + ret = mt7601u_assign_pipes(usb_intf, dev); if (ret) goto err; diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h index 49e188fa3798..bc182022b9d6 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.h +++ b/drivers/net/wireless/mediatek/mt7601u/usb.h @@ -23,6 +23,8 @@ #define MT_VEND_DEV_MODE_RESET 1 +#define MT_VEND_BUF sizeof(__le32) + enum mt_vendor_req { MT_VEND_DEV_MODE = 1, MT_VEND_WRITE = 2, diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig index 48edf387683e..317d99189556 100644 --- a/drivers/net/wireless/mwifiex/Kconfig +++ b/drivers/net/wireless/mwifiex/Kconfig @@ -9,36 +9,36 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997" depends on MWIFIEX && MMC select FW_LOADER select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell - 8786/8787/8797/8887/8897 chipsets with SDIO interface. + 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface. If you choose to build it as a module, it will be called mwifiex_sdio. config MWIFIEX_PCIE - tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897" + tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997" depends on MWIFIEX && PCI select FW_LOADER select WANT_DEV_COREDUMP ---help--- This adds support for wireless adapters based on Marvell - 8766/8897 chipsets with PCIe interface. + 8766/8897/8997 chipsets with PCIe interface. If you choose to build it as a module, it will be called mwifiex_pcie. config MWIFIEX_USB - tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897" + tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997" depends on MWIFIEX && USB select FW_LOADER ---help--- This adds support for wireless adapters based on Marvell - 8797/8897 chipset with USB interface. + 8797/8897/8997 chipset with USB interface. If you choose to build it as a module, it will be called mwifiex_usb. diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 207da40500f4..45ae38e32621 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -167,8 +167,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, mwifiex_dbg(adapter, ERROR, "DNLD_CMD: FW in reset state, ignore cmd %#x\n", cmd_code); - if (cmd_node->wait_q_enabled) - mwifiex_complete_cmd(adapter, cmd_node); mwifiex_recycle_cmd_node(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); return -1; @@ -809,17 +807,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter) adapter->is_cmd_timedout = 0; resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data; - if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) { - mwifiex_dbg(adapter, ERROR, - "CMD_RESP: %#x been canceled\n", - le16_to_cpu(resp->command)); - mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd); - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); - adapter->curr_cmd = NULL; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - return -1; - } - if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) { /* Copy original response back to response buffer */ struct mwifiex_ds_misc_cmd *hostcmd; @@ -989,12 +976,13 @@ mwifiex_cmd_timeout_func(unsigned long function_context) if (cmd_node->wait_q_enabled) { adapter->cmd_wait_q.status = -ETIMEDOUT; - wake_up_interruptible(&adapter->cmd_wait_q.wait); mwifiex_cancel_pending_ioctl(adapter); } } - if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) + if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) { mwifiex_init_fw_complete(adapter); + return; + } if (adapter->if_ops.device_dump) adapter->if_ops.device_dump(adapter); @@ -1024,6 +1012,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) adapter->curr_cmd->wait_q_enabled = false; adapter->cmd_wait_q.status = -1; mwifiex_complete_cmd(adapter, adapter->curr_cmd); + /* no recycle probably wait for response */ } /* Cancel all pending command */ spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); @@ -1032,11 +1021,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); - if (cmd_node->wait_q_enabled) { + if (cmd_node->wait_q_enabled) adapter->cmd_wait_q.status = -1; - mwifiex_complete_cmd(adapter, cmd_node); - cmd_node->wait_q_enabled = false; - } mwifiex_recycle_cmd_node(adapter, cmd_node); spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags); } @@ -1094,12 +1080,18 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) (adapter->curr_cmd->wait_q_enabled)) { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); cmd_node = adapter->curr_cmd; - cmd_node->wait_q_enabled = false; - cmd_node->cmd_flag |= CMD_F_CANCELED; - mwifiex_recycle_cmd_node(adapter, cmd_node); - mwifiex_complete_cmd(adapter, adapter->curr_cmd); + /* setting curr_cmd to NULL is quite dangerous, because + * mwifiex_process_cmdresp checks curr_cmd to be != NULL + * at the beginning then relies on it and dereferences + * it at will + * this probably works since mwifiex_cmd_timeout_func + * is the only caller of this function and responses + * at that point + */ adapter->curr_cmd = NULL; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); + + mwifiex_recycle_cmd_node(adapter, cmd_node); } /* Cancel all pending scan command */ @@ -1129,7 +1121,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) } } } - adapter->cmd_wait_q.status = -1; } /* diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index cff38ad129aa..3ec2ac82e394 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -438,7 +438,6 @@ enum P2P_MODES { #define CMD_F_HOSTCMD (1 << 0) -#define CMD_F_CANCELED (1 << 1) #define HostCmd_CMD_ID_MASK 0x0fff @@ -686,6 +685,7 @@ struct mwifiex_fw_chan_stats { enum mwifiex_chan_scan_mode_bitmasks { MWIFIEX_PASSIVE_SCAN = BIT(0), MWIFIEX_DISABLE_CHAN_FILT = BIT(1), + MWIFIEX_HIDDEN_SSID_REPORT = BIT(4), }; struct mwifiex_chan_scan_param_set { diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 8fa363add970..5d3ae63baea4 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -301,7 +301,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM; adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM; adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM; - + adapter->active_scan_triggered = false; setup_timer(&adapter->wakeup_timer, wakeup_timer_fn, (unsigned long)adapter); } @@ -551,11 +551,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter) } } - if (adapter->if_ops.init_fw_port) { - if (adapter->if_ops.init_fw_port(adapter)) - return -1; - } - for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta, diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index face7478937f..6b9512140e7a 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -666,6 +666,7 @@ struct mwifiex_private { struct mwifiex_11h_intf_state state_11h; struct mwifiex_ds_mem_rw mem_rw; struct sk_buff_head bypass_txq; + struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; }; @@ -986,6 +987,7 @@ struct mwifiex_adapter { u8 coex_tx_win_size; u8 coex_rx_win_size; bool drcs_enabled; + u8 active_scan_triggered; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 77b9055a2d14..408b68460716 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -266,12 +266,17 @@ static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - .driver_data = (unsigned long) &mwifiex_pcie8766, + .driver_data = (unsigned long)&mwifiex_pcie8766, }, { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897, PCI_ANY_ID, PCI_ANY_ID, 0, 0, - .driver_data = (unsigned long) &mwifiex_pcie8897, + .driver_data = (unsigned long)&mwifiex_pcie8897, + }, + { + PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + .driver_data = (unsigned long)&mwifiex_pcie8997, }, {}, }; @@ -1082,6 +1087,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter) card->txbd_rdptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_rdptr += reg->ring_tx_start_ptr; break; } @@ -1179,6 +1185,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb, card->txbd_wrptr++; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: card->txbd_wrptr += reg->ring_tx_start_ptr; break; } @@ -1807,6 +1814,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, if (!card->evt_buf_list[rdptr]) { skb_push(skb, INTF_HEADER_LEN); + skb_put(skb, MAX_EVENT_SIZE - skb->len); + memset(skb->data, 0, MAX_EVENT_SIZE); if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE, PCI_DMA_FROMDEVICE)) @@ -2731,3 +2740,4 @@ MODULE_VERSION(PCIE_VERSION); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME); MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME); +MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 0e7ee8b72358..48e549c3b285 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -30,10 +30,12 @@ #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin" +#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin" #define PCIE_VENDOR_ID_MARVELL (0x11ab) #define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30) #define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38) +#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42) /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 @@ -197,7 +199,38 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .sleep_cookie = 0, .fw_dump_ctrl = 0xcf4, .fw_dump_start = 0xcf8, - .fw_dump_end = 0xcff + .fw_dump_end = 0xcff, +}; + +static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { + .cmd_addr_lo = PCIE_SCRATCH_0_REG, + .cmd_addr_hi = PCIE_SCRATCH_1_REG, + .cmd_size = PCIE_SCRATCH_2_REG, + .fw_status = PCIE_SCRATCH_3_REG, + .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG, + .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG, + .tx_rdptr = 0xC1A4, + .tx_wrptr = 0xC1A8, + .rx_rdptr = 0xC1A8, + .rx_wrptr = 0xC1A4, + .evt_rdptr = PCIE_SCRATCH_10_REG, + .evt_wrptr = PCIE_SCRATCH_11_REG, + .drv_rdy = PCIE_SCRATCH_12_REG, + .tx_start_ptr = 16, + .tx_mask = 0x0FFF0000, + .tx_wrap_mask = 0x01FF0000, + .rx_mask = 0x00000FFF, + .rx_wrap_mask = 0x000001FF, + .tx_rollover_ind = BIT(28), + .rx_rollover_ind = BIT(12), + .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND, + .ring_flag_sop = MWIFIEX_BD_FLAG_SOP, + .ring_flag_eop = MWIFIEX_BD_FLAG_EOP, + .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP, + .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP, + .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, + .pfu_enabled = 1, + .sleep_cookie = 0, }; struct mwifiex_pcie_device { @@ -227,6 +260,15 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { .can_ext_scan = true, }; +static const struct mwifiex_pcie_device mwifiex_pcie8997 = { + .firmware = PCIE8997_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_8997, + .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .can_dump_fw = false, + .can_ext_scan = true, +}; + struct mwifiex_evt_buf_desc { u64 paddr; u16 len; @@ -325,6 +367,7 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card) return 1; break; case PCIE_DEVICE_ID_MARVELL_88W8897: + case PCIE_DEVICE_ID_MARVELL_88W8997: if (((card->txbd_wrptr & reg->tx_mask) != (card->txbd_rdptr & reg->tx_mask)) || ((card->txbd_wrptr & reg->tx_rollover_ind) == diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index ef8da8ebcbab..5847863a2d6b 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -527,7 +527,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, if (ch->flags & IEEE80211_CHAN_NO_IR) scan_chan_list[chan_idx].chan_scan_mode_bitmap - |= MWIFIEX_PASSIVE_SCAN; + |= (MWIFIEX_PASSIVE_SCAN | + MWIFIEX_HIDDEN_SSID_REPORT); else scan_chan_list[chan_idx].chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; @@ -1049,7 +1050,8 @@ mwifiex_config_scan(struct mwifiex_private *priv, if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) (scan_chan_list + chan_idx)->chan_scan_mode_bitmap - |= MWIFIEX_PASSIVE_SCAN; + |= (MWIFIEX_PASSIVE_SCAN | + MWIFIEX_HIDDEN_SSID_REPORT); else (scan_chan_list + chan_idx)->chan_scan_mode_bitmap @@ -1600,6 +1602,62 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, return ret; } +/* This function checks if SSID string contains all zeroes or length is zero */ +static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid) +{ + int idx; + + for (idx = 0; idx < ssid->ssid_len; idx++) { + if (ssid->ssid[idx]) + return false; + } + + return true; +} + +/* This function checks if any hidden SSID found in passive scan channels + * and save those channels for specific SSID active scan + */ +static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv, + struct cfg80211_bss *bss) +{ + struct mwifiex_bssdescriptor *bss_desc; + int ret; + int chid; + + /* Allocate and fill new bss descriptor */ + bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL); + if (!bss_desc) + return -ENOMEM; + + ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); + if (ret) + goto done; + + if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) { + mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n"); + for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) { + if (priv->hidden_chan[chid].chan_number == + bss->channel->hw_value) + break; + + if (!priv->hidden_chan[chid].chan_number) { + priv->hidden_chan[chid].chan_number = + bss->channel->hw_value; + priv->hidden_chan[chid].radio_type = + bss->channel->band; + priv->hidden_chan[chid].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + break; + } + } + } + +done: + kfree(bss_desc); + return 0; +} + static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, struct cfg80211_bss *bss) { @@ -1789,6 +1847,14 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, .mac_address, ETH_ALEN)) mwifiex_update_curr_bss_params(priv, bss); cfg80211_put_bss(priv->wdev.wiphy, bss); + + if ((chan->flags & IEEE80211_CHAN_RADAR) || + (chan->flags & IEEE80211_CHAN_NO_IR)) { + mwifiex_dbg(adapter, INFO, + "radar or passive channel %d\n", + channel); + mwifiex_save_hidden_ssid_channels(priv, bss); + } } } else { mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n"); @@ -1812,6 +1878,57 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv) } } +/* This function checks if any hidden SSID found in passive scan channels + * and do specific SSID active scan for those channels + */ +static int +mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) +{ + int ret; + struct mwifiex_adapter *adapter = priv->adapter; + u8 id = 0; + struct mwifiex_user_scan_cfg *user_scan_cfg; + + if (adapter->active_scan_triggered) { + adapter->active_scan_triggered = false; + return 0; + } + + if (!priv->hidden_chan[0].chan_number) { + mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n"); + return 0; + } + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); + + if (!user_scan_cfg) + return -ENOMEM; + + memset(user_scan_cfg, 0, sizeof(*user_scan_cfg)); + + for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) { + if (!priv->hidden_chan[id].chan_number) + break; + memcpy(&user_scan_cfg->chan_list[id], + &priv->hidden_chan[id], + sizeof(struct mwifiex_user_scan_chan)); + } + + adapter->active_scan_triggered = true; + user_scan_cfg->num_ssids = priv->scan_request->n_ssids; + user_scan_cfg->ssid_list = priv->scan_request->ssids; + + ret = mwifiex_scan_networks(priv, user_scan_cfg); + kfree(user_scan_cfg); + + memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan)); + + if (ret) { + dev_err(priv->adapter->dev, "scan failed: %d\n", ret); + return ret; + } + + return 0; +} static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) { struct mwifiex_adapter *adapter = priv->adapter; @@ -1825,6 +1942,8 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); + mwifiex_active_scan_req_for_passive_chan(priv); + if (!adapter->ext_scan) mwifiex_complete_scan(priv); @@ -1851,15 +1970,17 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); - if (priv->scan_request) { - mwifiex_dbg(adapter, INFO, - "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } else { - priv->scan_aborting = false; - mwifiex_dbg(adapter, INFO, - "info: scan already aborted\n"); + if (!adapter->active_scan_triggered) { + if (priv->scan_request) { + mwifiex_dbg(adapter, INFO, + "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, 1); + priv->scan_request = NULL; + } else { + priv->scan_aborting = false; + mwifiex_dbg(adapter, INFO, + "info: scan already aborted\n"); + } } } else { /* Get scan command from scan_pending_q and put to diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index a0b121f3460c..5d05c6fe6429 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -51,6 +51,10 @@ static unsigned long iface_work_flags; static struct semaphore add_remove_card_sem; +static struct memory_type_mapping generic_mem_type_map[] = { + {"DUMP", NULL, 0, 0xDD}, +}; + static struct memory_type_mapping mem_type_mapping_tbl[] = { {"ITCM", NULL, 0, 0xF0}, {"DTCM", NULL, 0, 0xF1}, @@ -91,6 +95,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) return -ENOMEM; card->func = func; + card->device_id = id; func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; @@ -107,6 +112,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size; card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size; card->can_dump_fw = data->can_dump_fw; + card->fw_dump_enh = data->fw_dump_enh; card->can_auto_tdls = data->can_auto_tdls; card->can_ext_scan = data->can_ext_scan; } @@ -287,6 +293,8 @@ static int mwifiex_sdio_suspend(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) /* Device ID for SD8801 */ #define SDIO_DEVICE_ID_MARVELL_8801 (0x9139) +/* Device ID for SD8997 */ +#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141) /* WLAN IDs */ @@ -303,6 +311,8 @@ static const struct sdio_device_id mwifiex_ids[] = { .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801), .driver_data = (unsigned long)&mwifiex_sdio_sd8801}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997), + .driver_data = (unsigned long)&mwifiex_sdio_sd8997}, {}, }; @@ -910,6 +920,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, if (!fwbuf) return -ENOMEM; + sdio_claim_host(card->func); + /* Perform firmware data transfer */ do { /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY @@ -1014,6 +1026,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, offset += txlen; } while (true); + sdio_release_host(card->func); + mwifiex_dbg(adapter, MSG, "info: FW download over, size %d bytes\n", offset); @@ -1964,8 +1978,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &func->dev; strcpy(adapter->fw_name, card->firmware); - adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; - adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + if (card->fw_dump_enh) { + adapter->mem_type_mapping_tbl = generic_mem_type_map; + adapter->num_mem_types = 1; + } else { + adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; + adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + } return 0; } @@ -2107,26 +2126,46 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port) port, card->mp_data_port_mask); } +static void mwifiex_recreate_adapter(struct sdio_mmc_card *card) +{ + struct sdio_func *func = card->func; + const struct sdio_device_id *device_id = card->device_id; + + /* TODO mmc_hw_reset does not require destroying and re-probing the + * whole adapter. Hence there was no need to for this rube-goldberg + * design to reload the fw from an external workqueue. If we don't + * destroy the adapter we could reload the fw from + * mwifiex_main_work_queue directly. + * The real difficulty with fw reset is to restore all the user + * settings applied through ioctl. By destroying and recreating the + * adapter, we take the easy way out, since we rely on user space to + * restore them. We assume that user space will treat the new + * incarnation of the adapter(interfaces) as if they had been just + * discovered and initializes them from scratch. + */ + + mwifiex_sdio_remove(func); + + /* power cycle the adapter */ + sdio_claim_host(func); + mmc_hw_reset(func->card->host); + sdio_release_host(func); + + mwifiex_sdio_probe(func, device_id); +} + static struct mwifiex_adapter *save_adapter; static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) { struct sdio_mmc_card *card = adapter->card; - struct mmc_host *target = card->func->card->host; - /* The actual reset operation must be run outside of driver thread. - * This is because mmc_remove_host() will cause the device to be - * instantly destroyed, and the driver then needs to end its thread, - * leading to a deadlock. - * - * We run it in a totally independent workqueue. + /* TODO card pointer is unprotected. If the adapter is removed + * physically, sdio core might trigger mwifiex_sdio_remove, before this + * workqueue is run, which will destroy the adapter struct. When this + * workqueue eventually exceutes it will dereference an invalid adapter + * pointer */ - - mwifiex_dbg(adapter, WARN, "Resetting card...\n"); - mmc_remove_host(target); - /* 200ms delay is based on experiment with sdhci controller */ - mdelay(200); - target->rescan_entered = 0; /* rescan non-removable cards */ - mmc_add_host(target); + mwifiex_recreate_adapter(card); } /* This function read/write firmware */ @@ -2138,8 +2177,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, int ret, tries; u8 ctrl_data = 0; - sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, - &ret); + sdio_writeb(card->func, card->reg->fw_dump_host_ready, + card->reg->fw_dump_ctrl, &ret); if (ret) { mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n"); return RDWR_STATUS_FAILURE; @@ -2155,10 +2194,10 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, break; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; - if (ctrl_data != FW_DUMP_HOST_READY) { + if (ctrl_data != card->reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, - "The ctrl reg was changed, re-try again!\n"); - sdio_writeb(card->func, FW_DUMP_HOST_READY, + "The ctrl reg was changed, re-try again\n"); + sdio_writeb(card->func, card->reg->fw_dump_host_ready, card->reg->fw_dump_ctrl, &ret); if (ret) { mwifiex_dbg(adapter, ERROR, "SDIO write err\n"); @@ -2167,7 +2206,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter, } usleep_range(100, 200); } - if (ctrl_data == FW_DUMP_HOST_READY) { + if (ctrl_data == card->reg->fw_dump_host_ready) { mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n"); return RDWR_STATUS_FAILURE; @@ -2300,10 +2339,129 @@ done: sdio_release_host(card->func); } +static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter) +{ + struct sdio_mmc_card *card = adapter->card; + struct memory_type_mapping *entry = &generic_mem_type_map[0]; + unsigned int reg, reg_start, reg_end; + u8 start_flag = 0, done_flag = 0; + u8 *dbg_ptr, *end_ptr; + enum rdwr_status stat; + int ret = -1, tries; + + if (!card->fw_dump_enh) + return; + + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + + mwifiex_pm_wakeup_card(adapter); + sdio_claim_host(card->func); + + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); + + stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + + reg_start = card->reg->fw_dump_start; + reg_end = card->reg->fw_dump_end; + for (reg = reg_start; reg <= reg_end; reg++) { + for (tries = 0; tries < MAX_POLL_TRIES; tries++) { + start_flag = sdio_readb(card->func, reg, &ret); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); + goto done; + } + if (start_flag == 0) + break; + if (tries == MAX_POLL_TRIES) { + mwifiex_dbg(adapter, ERROR, + "FW not ready to dump\n"); + ret = -1; + goto done; + } + } + usleep_range(100, 200); + } + + entry->mem_ptr = vmalloc(0xf0000 + 1); + if (!entry->mem_ptr) { + ret = -1; + goto done; + } + dbg_ptr = entry->mem_ptr; + entry->mem_size = 0xf0000; + end_ptr = dbg_ptr + entry->mem_size; + + done_flag = entry->done_flag; + mwifiex_dbg(adapter, DUMP, + "Start %s output, please wait...\n", entry->mem_name); + + while (true) { + stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag); + if (stat == RDWR_STATUS_FAILURE) + goto done; + for (reg = reg_start; reg <= reg_end; reg++) { + *dbg_ptr = sdio_readb(card->func, reg, &ret); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "SDIO read err\n"); + goto done; + } + dbg_ptr++; + if (dbg_ptr >= end_ptr) { + u8 *tmp_ptr; + + tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1); + if (!tmp_ptr) + goto done; + + memcpy(tmp_ptr, entry->mem_ptr, + entry->mem_size); + vfree(entry->mem_ptr); + entry->mem_ptr = tmp_ptr; + tmp_ptr = NULL; + dbg_ptr = entry->mem_ptr + entry->mem_size; + entry->mem_size += 0x4000; + end_ptr = entry->mem_ptr + entry->mem_size; + } + } + if (stat == RDWR_STATUS_DONE) { + entry->mem_size = dbg_ptr - entry->mem_ptr; + mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n", + entry->mem_name, entry->mem_size); + ret = 0; + break; + } + } + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); + +done: + if (ret) { + mwifiex_dbg(adapter, ERROR, "firmware dump failed\n"); + if (entry->mem_ptr) { + vfree(entry->mem_ptr); + entry->mem_ptr = NULL; + } + entry->mem_size = 0; + } + sdio_release_host(card->func); +} + static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter) { + struct sdio_mmc_card *card = adapter->card; + mwifiex_drv_info_dump(adapter); - mwifiex_sdio_fw_dump(adapter); + if (card->fw_dump_enh) + mwifiex_sdio_generic_fw_dump(adapter); + else + mwifiex_sdio_fw_dump(adapter); mwifiex_upload_device_dump(adapter); } @@ -2510,3 +2668,4 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h index 6f645cf47369..b9fbc5cf6262 100644 --- a/drivers/net/wireless/mwifiex/sdio.h +++ b/drivers/net/wireless/mwifiex/sdio.h @@ -35,6 +35,7 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" +#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 @@ -222,6 +223,7 @@ struct mwifiex_sdio_card_reg { u8 cmd_cfg_1; u8 cmd_cfg_2; u8 cmd_cfg_3; + u8 fw_dump_host_ready; u8 fw_dump_ctrl; u8 fw_dump_start; u8 fw_dump_end; @@ -257,11 +259,15 @@ struct sdio_mmc_card { bool supports_sdio_new_mode; bool has_control_mask; bool can_dump_fw; + bool fw_dump_enh; bool can_auto_tdls; bool can_ext_scan; struct mwifiex_sdio_mpa_tx mpa_tx; struct mwifiex_sdio_mpa_rx mpa_rx; + + /* needed for card reset */ + const struct sdio_device_id *device_id; }; struct mwifiex_sdio_device { @@ -275,6 +281,7 @@ struct mwifiex_sdio_device { bool supports_sdio_new_mode; bool has_control_mask; bool can_dump_fw; + bool fw_dump_enh; bool can_auto_tdls; bool can_ext_scan; }; @@ -350,6 +357,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { .cmd_cfg_1 = 0xb9, .cmd_cfg_2 = 0xba, .cmd_cfg_3 = 0xbb, + .fw_dump_host_ready = 0xee, .fw_dump_ctrl = 0xe2, .fw_dump_start = 0xe3, .fw_dump_end = 0xea, @@ -361,6 +369,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { 0x59, 0x5c, 0x5d}, }; +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0xF8, + .base_1_reg = 0xF9, + .poll_reg = 0x5C, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x4, + .host_int_status_reg = 0x0C, + .host_int_mask_reg = 0x08, + .status_reg_0 = 0xE8, + .status_reg_1 = 0xE9, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xE4, + .io_port_1_reg = 0xE5, + .io_port_2_reg = 0xE6, + .max_mp_regs = 196, + .rd_bitmap_l = 0x10, + .rd_bitmap_u = 0x11, + .rd_bitmap_1l = 0x12, + .rd_bitmap_1u = 0x13, + .wr_bitmap_l = 0x14, + .wr_bitmap_u = 0x15, + .wr_bitmap_1l = 0x16, + .wr_bitmap_1u = 0x17, + .rd_len_p0_l = 0x18, + .rd_len_p0_u = 0x19, + .card_misc_cfg_reg = 0xd8, + .card_cfg_2_1_reg = 0xd9, + .cmd_rd_len_0 = 0xc0, + .cmd_rd_len_1 = 0xc1, + .cmd_rd_len_2 = 0xc2, + .cmd_rd_len_3 = 0xc3, + .cmd_cfg_0 = 0xc4, + .cmd_cfg_1 = 0xc5, + .cmd_cfg_2 = 0xc6, + .cmd_cfg_3 = 0xc7, + .fw_dump_host_ready = 0xcc, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, + .func1_dump_reg_start = 0x10, + .func1_dump_reg_end = 0x17, + .func1_scratch_reg = 0xe8, + .func1_spec_reg_num = 13, + .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, + 0x60, 0x61, 0x62, 0x64, + 0x65, 0x66, 0x68, 0x69, + 0x6a}, +}; + static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = { .start_rd_port = 0, .start_wr_port = 0, @@ -469,6 +530,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .can_ext_scan = true, }; +static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { + .firmware = SD8997_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8997, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = true, + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, +}; + static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { .firmware = SD8887_DEFAULT_FW_NAME, .reg = &mwifiex_reg_sd8887, diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 89e8dafb4738..87b69d8ad120 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -895,7 +895,7 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv, case ACT_TDLS_DELETE: if (reason) { if (!node || reason == TDLS_ERR_LINK_NONEXISTENT) - mwifiex_dbg(priv->adapter, ERROR, + mwifiex_dbg(priv->adapter, MSG, "TDLS link delete for %pM failed: reason %d\n", cmd_tdls_oper->peer_mac, reason); else diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d8b7d9c20450..a6c8a4f7bfe9 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -66,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - mwifiex_dbg(adapter, ERROR, - "cmd_wait_q terminated: %d\n", status); + mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n", + status); mwifiex_cancel_all_pending_cmd(adapter); return status; } diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c index aa3d3c5ed07b..b3e163de9899 100644 --- a/drivers/net/wireless/mwifiex/tdls.c +++ b/drivers/net/wireless/mwifiex/tdls.c @@ -164,7 +164,7 @@ static void mwifiex_tdls_add_aid(struct mwifiex_private *priv, pos = (void *)skb_put(skb, 4); *pos++ = WLAN_EID_AID; *pos++ = 2; - *pos++ = le16_to_cpu(assoc_rsp->a_id); + memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id)); return; } diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c index 492a8b3c636e..46c972a650a4 100644 --- a/drivers/net/wireless/mwifiex/uap_event.c +++ b/drivers/net/wireless/mwifiex/uap_event.c @@ -41,6 +41,8 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv, mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:", event->data, event->len); + skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE); + while ((evt_len >= sizeof(tlv_hdr->header))) { tlv_hdr = (struct mwifiex_ie_types_data *)curr; tlv_len = le16_to_cpu(tlv_hdr->header.len); diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index fbad99c50307..5e789b2e06ea 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -47,6 +47,11 @@ static struct usb_device_id mwifiex_usb_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0xff)}, + /* 8997 */ + {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)}, + {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0xff)}, { } /* Terminating entry */ }; @@ -382,12 +387,14 @@ static int mwifiex_usb_probe(struct usb_interface *intf, case USB8797_PID_1: case USB8801_PID_1: case USB8897_PID_1: + case USB8997_PID_1: card->usb_boot_state = USB8XXX_FW_DNLD; break; case USB8766_PID_2: case USB8797_PID_2: case USB8801_PID_2: case USB8897_PID_2: + case USB8997_PID_2: card->usb_boot_state = USB8XXX_FW_READY; break; default: @@ -814,6 +821,12 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) adapter->dev = &card->udev->dev; switch (le16_to_cpu(card->udev->descriptor.idProduct)) { + case USB8997_PID_1: + case USB8997_PID_2: + adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; + strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME); + adapter->ext_scan = true; + break; case USB8897_PID_1: case USB8897_PID_2: adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K; @@ -870,8 +883,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter, /* Allocate memory for transmit */ fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL); - if (!fwdata) + if (!fwdata) { + ret = -ENOMEM; goto fw_exit; + } /* Allocate memory for receive */ recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL); @@ -1121,3 +1136,4 @@ MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME); MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME); +MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h index 57e1a5736318..f0051f8c8981 100644 --- a/drivers/net/wireless/mwifiex/usb.h +++ b/drivers/net/wireless/mwifiex/usb.h @@ -32,6 +32,8 @@ #define USB8897_PID_2 0x2046 #define USB8801_PID_1 0x2049 #define USB8801_PID_2 0x204a +#define USB8997_PID_1 0x204d +#define USB8997_PID_2 0x204e #define USB8XXX_FW_DNLD 1 @@ -46,6 +48,7 @@ #define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin" #define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin" #define USB8897_DEFAULT_FW_NAME "mrvl/usb8897_uapsta.bin" +#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin" #define FW_DNLD_TX_BUF_SIZE 620 #define FW_DNLD_RX_BUF_SIZE 2048 diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index 2504e422364a..0cec8a64473e 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -126,6 +126,10 @@ static int num_of_items = ARRAY_SIZE(items); int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter) { + if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) + if (adapter->if_ops.init_fw_port) + adapter->if_ops.init_fw_port(adapter); + adapter->init_wait_q_woken = true; wake_up_interruptible(&adapter->init_wait_q); return 0; @@ -496,16 +500,12 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) int mwifiex_complete_cmd(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node) { - mwifiex_dbg(adapter, CMD, - "cmd completed: status=%d\n", + WARN_ON(!cmd_node->wait_q_enabled); + mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n", adapter->cmd_wait_q.status); - *(cmd_node->condition) = true; - - if (adapter->cmd_wait_q.status == -ETIMEDOUT) - mwifiex_dbg(adapter, ERROR, "cmd timeout\n"); - else - wake_up_interruptible(&adapter->cmd_wait_q.wait); + *cmd_node->condition = true; + wake_up_interruptible(&adapter->cmd_wait_q.wait); return 0; } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 77361af68b18..9420fc61c2e6 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -5019,35 +5019,36 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16); rcu_read_unlock(); - } - if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && - !priv->ap_fw) { - rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates); - if (rc) - goto out; + if (changed & BSS_CHANGED_ASSOC) { + if (!priv->ap_fw) { + rc = mwl8k_cmd_set_rate(hw, vif, + ap_legacy_rates, + ap_mcs_rates); + if (rc) + goto out; - rc = mwl8k_cmd_use_fixed_rate_sta(hw); - if (rc) - goto out; - } else { - if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc && - priv->ap_fw) { - int idx; - int rate; + rc = mwl8k_cmd_use_fixed_rate_sta(hw); + if (rc) + goto out; + } else { + int idx; + int rate; - /* Use AP firmware specific rate command. - */ - idx = ffs(vif->bss_conf.basic_rates); - if (idx) - idx--; + /* Use AP firmware specific rate command. + */ + idx = ffs(vif->bss_conf.basic_rates); + if (idx) + idx--; - if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) - rate = mwl8k_rates_24[idx].hw_value; - else - rate = mwl8k_rates_50[idx].hw_value; + if (hw->conf.chandef.chan->band == + IEEE80211_BAND_2GHZ) + rate = mwl8k_rates_24[idx].hw_value; + else + rate = mwl8k_rates_50[idx].hw_value; - mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate); + } } } diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index c410180479e6..7b5c554323c7 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -2321,8 +2321,6 @@ void free_orinocodev(struct orinoco_private *priv) struct orinoco_rx_data *rx_data, *temp; struct orinoco_scan_data *sd, *sdtemp; - wiphy_unregister(wiphy); - /* If the tasklet is scheduled when we call tasklet_kill it * will run one final time. However the tasklet will only * drain priv->rx_list if the hw is still available. */ diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c index c0a27377d9e2..a956f965a1e5 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/orinoco/orinoco_cs.c @@ -118,6 +118,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link) orinoco_cs_release(link); + wiphy_unregister(priv_to_wiphy(priv)); free_orinocodev(priv); } /* orinoco_cs_detach */ diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c index 1b543e30eff7..048693b6c6c2 100644 --- a/drivers/net/wireless/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco/orinoco_nortel.c @@ -223,13 +223,15 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -263,6 +265,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev) iowrite16(0, card->bridge_io + 10); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c index 74219d59d7e1..4938a2208a37 100644 --- a/drivers/net/wireless/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/orinoco/orinoco_pci.c @@ -173,13 +173,15 @@ static int orinoco_pci_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -203,6 +205,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev) struct orinoco_private *priv = pci_get_drvdata(pdev); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c index 8b045236b6e0..221352027779 100644 --- a/drivers/net/wireless/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/orinoco/orinoco_plx.c @@ -262,13 +262,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev, err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); - goto fail; + goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; + fail_wiphy: + wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); @@ -299,6 +301,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev) struct orinoco_pci_card *card = priv->card; orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index 91f05442de28..26a57d773d30 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -1502,6 +1502,7 @@ static inline void ezusb_delete(struct ezusb_priv *upriv) if (upriv->dev) { struct orinoco_private *priv = ndev_priv(upriv->dev); orinoco_if_del(priv); + wiphy_unregister(priv_to_wiphy(upriv)); free_orinocodev(priv); } } @@ -1695,6 +1696,7 @@ static int ezusb_probe(struct usb_interface *interface, if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) { upriv->dev = NULL; err("%s: orinoco_if_add() failed", __func__); + wiphy_unregister(priv_to_wiphy(priv)); goto error; } upriv->dev = priv->ndev; diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b6cc9ff47fc2..1c6788aecc62 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 len; u32 num_blocks; + const u8 *fw; const struct firmware *fw_entry = NULL; u32 block_size = dev->tx_blk_size; int status = 0; @@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ + fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); - status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); + status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index 1106ce76707e..30c2cf7fa93b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common) return status; } + /* Copy firmware into DMA-accessible memory */ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); + if (!fw) + return -ENOMEM; len = fw_entry->size; if (len % 4) @@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common) rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); status = rsi_copy_to_card(common, fw, len, num_blocks); + kfree(fw); release_firmware(fw_entry); return status; } diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2b4ef256c6b9..de62f5dcb62f 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -240,7 +240,6 @@ config RT2X00_LIB_USB config RT2X00_LIB tristate - select AVERAGE config RT2X00_LIB_FIRMWARE bool diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h index afba0739c3b8..78cc035b2d17 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -54,7 +54,7 @@ #define CSR_REG_BASE 0x0400 #define CSR_REG_SIZE 0x0100 #define EEPROM_BASE 0x0000 -#define EEPROM_SIZE 0x006a +#define EEPROM_SIZE 0x006e #define BBP_BASE 0x0000 #define BBP_SIZE 0x0060 #define RF_BASE 0x0004 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 9bb398bed9bb..3282ddb766f4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -254,6 +254,8 @@ struct link_qual { int tx_failed; }; +DECLARE_EWMA(rssi, 1024, 8) + /* * Antenna settings about the currently active link. */ @@ -285,7 +287,7 @@ struct link_ant { * Similar to the avg_rssi in the link_qual structure * this value is updated by using the walking average. */ - struct ewma rssi_ant; + struct ewma_rssi rssi_ant; }; /* @@ -314,7 +316,7 @@ struct link { /* * Currently active average RSSI value */ - struct ewma avg_rssi; + struct ewma_rssi avg_rssi; /* * Work structure for scheduling periodic link tuning. diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c index 9b941c0c1264..017188e5a736 100644 --- a/drivers/net/wireless/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/rt2x00/rt2x00link.c @@ -33,15 +33,11 @@ */ #define DEFAULT_RSSI -128 -/* Constants for EWMA calculations. */ -#define RT2X00_EWMA_FACTOR 1024 -#define RT2X00_EWMA_WEIGHT 8 - -static inline int rt2x00link_get_avg_rssi(struct ewma *ewma) +static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma) { unsigned long avg; - avg = ewma_read(ewma); + avg = ewma_rssi_read(ewma); if (avg) return -avg; @@ -76,8 +72,7 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev, static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev) { - ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR, - RT2X00_EWMA_WEIGHT); + ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant); } static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev) @@ -225,12 +220,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev, /* * Update global RSSI */ - ewma_add(&link->avg_rssi, -rxdesc->rssi); + ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi); /* * Update antenna RSSI */ - ewma_add(&ant->rssi_ant, -rxdesc->rssi); + ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi); } void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) @@ -285,8 +280,7 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) */ rt2x00dev->link.count = 0; memset(qual, 0, sizeof(*qual)); - ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR, - RT2X00_EWMA_WEIGHT); + ewma_rssi_init(&rt2x00dev->link.avg_rssi); /* * Restore the VGC level as stored in the registers, diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index 3b3a88b53b11..585d0883c7e5 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + struct rtl_tcb_desc tcb_desc; - if (skb) - rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); + if (skb) { + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); + rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc); + } } static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c index c8058aa73ecf..629125658b87 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c @@ -200,7 +200,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -209,7 +209,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, @@ -219,10 +219,10 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x), Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h index 05e944e451f4..21bd4a5337ab 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h @@ -37,7 +37,7 @@ #define FW_8192C_POLLING_TIMEOUT_COUNT 3000 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFFF) == 0x88E1) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1) #define USE_OLD_WOWLAN_DEBUG_FW 0 #define H2C_88E_RSVDPAGE_LOC_LEN 5 @@ -131,25 +131,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl92c_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8188e_h2c_cmd { H2C_88E_RSVDPAGE = 0, H2C_88E_JOINBSSRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c index 0aca6f47487c..03cbe4cf110b 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -39,6 +39,7 @@ #define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) #define BT_RSSI_STATE_BG_EDCA_LOW BIT_OFFSET_LEN_MASK_32(3, 1) #define BT_RSSI_STATE_TXPOWER_LOW BIT_OFFSET_LEN_MASK_32(4, 1) +#define BT_MASK 0x00ffffff #define RTLPRIV (struct rtl_priv *) #define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ @@ -312,7 +313,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) struct dig_t *digtable = &rtlpriv->dm_digtable; u32 isbt; - /* modify DIG lower bound, deal with abnorally large false alarm */ + /* modify DIG lower bound, deal with abnormally large false alarm */ if (rtlpriv->falsealm_cnt.cnt_all > 10000) { digtable->large_fa_hit++; if (digtable->forbidden_igi < digtable->cur_igvalue) { @@ -1536,13 +1537,11 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw) return false; bt_state = rtl_read_byte(rtlpriv, 0x4fd); - bt_tx = rtl_read_dword(rtlpriv, 0x488); - bt_tx = bt_tx & 0x00ffffff; - bt_pri = rtl_read_dword(rtlpriv, 0x48c); - bt_pri = bt_pri & 0x00ffffff; + bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK; + bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK; polling = rtl_read_dword(rtlpriv, 0x490); - if (bt_tx == 0xffffffff && bt_pri == 0xffffffff && + if (bt_tx == BT_MASK && bt_pri == BT_MASK && polling == 0xffffffff && bt_state == 0xff) return false; diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 14b819ea8b71..43fcb25c885f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -221,7 +221,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -230,19 +230,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; if (IS_FW_HEADER_EXIST(pfwheader)) { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x),Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - rtlhal->fw_version = pfwheader->version; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } _rtl92c_enable_fw_download(hw, true); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index e9f4281f5067..864806c19ca7 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h @@ -69,25 +69,6 @@ ((GET_CVID_CUT_VERSION(version) == \ CHIP_VENDOR_UMC_B_CUT) ? true : false) : false) -struct rtl92c_firmware_header { - __le16 signature; - u8 category; - u8 function; - __le16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - __le16 ramcodeSize; - __le16 rsvd2; - __le32 svnindex; - __le32 rsvd3; - __le32 rsvd4; - __le32 rsvd5; -}; - #define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \ diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 7cf36619f250..25db369b5d18 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -818,26 +818,29 @@ static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) { - u16 value16; - + u16 value16; + u32 value32; struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS | - RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | - RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); - rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); + value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS | + RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | + RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32)); /* Accept all multicast address */ rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF); /* Accept all management frames */ value16 = 0xFFFF; - rtl92c_set_mgt_filter(hw, value16); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER, + (u8 *)(&value16)); /* Reject all control frame - default value is 0 */ - rtl92c_set_ctrl_filter(hw, 0x0); + value16 = 0x0; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER, + (u8 *)(&value16)); /* Accept all data frames */ value16 = 0xFFFF; - rtl92c_set_data_filter(hw, value16); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER, + (u8 *)(&value16)); } static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw) @@ -988,17 +991,6 @@ static void _InitPABias(struct ieee80211_hw *hw) } } -static void _update_mac_setting(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - - mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR); - mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0); - mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1); - mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2); -} - int rtl92cu_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -1068,7 +1060,6 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) } _rtl92cu_hw_configure(hw); _InitPABias(hw); - _update_mac_setting(hw); rtl92c_dm_init(hw); exit: local_irq_restore(flags); @@ -1620,7 +1611,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); enum wireless_mode wirelessmode = mac->mode; u8 idx = 0; @@ -1829,63 +1819,10 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u4b_ac_param); break; default: - RT_ASSERT(false, - "SetHwReg8185(): invalid aci: %d !\n", + RT_ASSERT(false, "invalid aci: %d !\n", e_aci); break; } - if (rtlusb->acm_method != EACMWAY2_SW) - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_ACM_CTRL, &e_aci); - break; - } - case HW_VAR_ACM_CTRL:{ - u8 e_aci = *val; - union aci_aifsn *p_aci_aifsn = (union aci_aifsn *) - (&(mac->ac[0].aifs)); - u8 acm = p_aci_aifsn->f.acm; - u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); - - acm_ctrl = - acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1); - if (acm) { - switch (e_aci) { - case AC0_BE: - acm_ctrl |= AcmHw_BeqEn; - break; - case AC2_VI: - acm_ctrl |= AcmHw_ViqEn; - break; - case AC3_VO: - acm_ctrl |= AcmHw_VoqEn; - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", - acm); - break; - } - } else { - switch (e_aci) { - case AC0_BE: - acm_ctrl &= (~AcmHw_BeqEn); - break; - case AC2_VI: - acm_ctrl &= (~AcmHw_ViqEn); - break; - case AC3_VO: - acm_ctrl &= (~AcmHw_VoqEn); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "switch case not processed\n"); - break; - } - } - RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, - "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", - acm_ctrl); - rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); break; } case HW_VAR_RCR:{ @@ -1999,12 +1936,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_MGT_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val); + mac->rx_mgt_filter = *(u16 *)val; break; case HW_VAR_CTRL_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val); + mac->rx_ctrl_filter = *(u16 *)val; break; case HW_VAR_DATA_FILTER: rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val); + mac->rx_data_filter = *(u16 *)val; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c index 1c55a002d4bd..035713311a4a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -393,59 +393,9 @@ void rtl92c_disable_interrupt(struct ieee80211_hw *hw) void rtl92c_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 u4b_ac_param; rtl92c_dm_init_edca_turbo(hw); - u4b_ac_param = (u32) mac->ac[aci].aifs; - u4b_ac_param |= - ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) << - AC_PARAM_ECW_MIN_OFFSET; - u4b_ac_param |= - ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) << - AC_PARAM_ECW_MAX_OFFSET; - u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) << - AC_PARAM_TXOP_OFFSET; - RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n", - aci, u4b_ac_param); - switch (aci) { - case AC1_BK: - rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); - break; - case AC0_BE: - rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); - break; - case AC2_VI: - rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param); - break; - case AC3_VO: - rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param); - break; - default: - RT_ASSERT(false, "invalid aci: %d !\n", aci); - break; - } -} - -/*------------------------------------------------------------------------- - * HW MAC Address - *-------------------------------------------------------------------------*/ -void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr) -{ - u32 i; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - for (i = 0 ; i < ETH_ALEN ; i++) - rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i)); - - RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, - "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n", - rtl_read_byte(rtlpriv, REG_MACID), - rtl_read_byte(rtlpriv, REG_MACID+1), - rtl_read_byte(rtlpriv, REG_MACID+2), - rtl_read_byte(rtlpriv, REG_MACID+3), - rtl_read_byte(rtlpriv, REG_MACID+4), - rtl_read_byte(rtlpriv, REG_MACID+5)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci); } void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size) @@ -644,47 +594,6 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T) rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value); } -u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP0); -} - -void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter); -} - -u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP1); -} - -void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter); -} - -u16 rtl92c_get_data_filter(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - return rtl_read_word(rtlpriv, REG_RXFLTMAP2); -} - -void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter); -} /*==============================================================*/ static u8 _rtl92c_query_rxpwrpercentage(char antpower) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h index e34f0f14ccd7..553a4bfac668 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h @@ -48,7 +48,6 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci); /*--------------------------------------------------------------- * Hardware init functions *---------------------------------------------------------------*/ -void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr); void rtl92c_init_interrupt(struct ieee80211_hw *hw); void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size); @@ -73,15 +72,6 @@ void rtl92c_init_retry_function(struct ieee80211_hw *hw); void rtl92c_disable_fast_edca(struct ieee80211_hw *hw); void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T); -/* For filter */ -u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw); -void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter); -u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw); -void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter); -u16 rtl92c_get_data_filter(struct ieee80211_hw *hw); -void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter); - - u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw); struct rx_fwinfo_92c { diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c index 587b8c505a76..7c1db7e7572d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c @@ -420,7 +420,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw) "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n", de_digtable->recover_cnt, de_digtable->rx_gain_min); - /* deal with abnorally large false alarm */ + /* deal with abnormally large false alarm */ if (falsealm_cnt->cnt_all > 10000) { RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "dm_DIG(): Abnormally false alarm case\n"); diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h index 1646e7c3d0f8..8a38daa316cb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h @@ -110,28 +110,6 @@ #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \ SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val) -struct rtl92d_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodeSize; - u16 rsvd2; - - u32 svnindex; - u32 rsvd3; - - u32 rsvd4; - u32 rsvd5; -}; - int rtl92d_download_fw(struct ieee80211_hw *hw); void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c index 232865cc3ffd..0708eedd9671 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c @@ -198,7 +198,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl92c_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -207,8 +207,8 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware; - rtlhal->fw_version = pfwheader->version; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -219,10 +219,10 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware Version(%d), Signature(%#x),Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl92c_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header); - fwsize = fwsize - sizeof(struct rtl92c_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "Firmware no Header, Signature(%#x)\n", diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h index 3e2a48e5fb4d..069da1e7e80a 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h @@ -33,7 +33,7 @@ #define FW_8192C_POLLING_TIMEOUT_COUNT 3000 #define IS_FW_HEADER_EXIST(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x92E0) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0) #define USE_OLD_WOWLAN_DEBUG_FW 0 #define H2C_92E_RSVDPAGE_LOC_LEN 5 @@ -89,25 +89,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl92c_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8192e_h2c_cmd { H2C_92E_RSVDPAGE = 0, H2C_92E_MSRRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c index 8280bab43df4..3859b3e3d158 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c @@ -205,9 +205,9 @@ bool rtl8723e_get_btc_status(void) return true; } -static bool is_fw_header(struct rtl8723e_firmware_header *hdr) +static bool is_fw_header(struct rtlwifi_firmware_header *hdr) { - return (hdr->signature & 0xfff0) == 0x2300; + return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300; } static struct rtl_hal_ops rtl8723e_hal_ops = { diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c index 1017f02d7bf7..d091f1d5f91e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c @@ -209,9 +209,9 @@ bool rtl8723be_get_btc_status(void) return true; } -static bool is_fw_header(struct rtl8723e_firmware_header *hdr) +static bool is_fw_header(struct rtlwifi_firmware_header *hdr) { - return (hdr->signature & 0xfff0) == 0x5300; + return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300; } static struct rtl_hal_ops rtl8723be_hal_ops = { @@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444); module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); +module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c index dd698e7e9ace..a2f5e89bedfe 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c @@ -253,7 +253,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl8723e_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -263,7 +263,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, if (!rtlhal->pfirmware) return 1; - pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware; + pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware; pfwdata = rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -275,10 +275,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "Firmware Version(%d), Signature(%#x), Size(%d)\n", pfwheader->version, pfwheader->signature, - (int)sizeof(struct rtl8723e_firmware_header)); + (int)sizeof(struct rtlwifi_firmware_header)); - pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header); - fwsize = fwsize - sizeof(struct rtl8723e_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) { diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h index 3ebafc80972f..8ea372d1626e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h @@ -50,25 +50,6 @@ enum version_8723e { VERSION_UNKNOWN = 0xFF, }; -struct rtl8723e_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodesize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8723be_cmd { H2C_8723BE_RSVDPAGE = 0, H2C_8723BE_JOINBSSRPT = 1, diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c index 95e95626b632..525eb234627c 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c @@ -210,7 +210,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl8821a_firmware_header *pfwheader; + struct rtlwifi_firmware_header *pfwheader; u8 *pfwdata; u32 fwsize; int err; @@ -228,8 +228,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) return 1; pfwheader = - (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware; - rtlhal->fw_version = pfwheader->version; + (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->wowlan_firmware; fwsize = rtlhal->wowlan_fwsize; @@ -238,8 +238,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) return 1; pfwheader = - (struct rtl8821a_firmware_header *)rtlhal->pfirmware; - rtlhal->fw_version = pfwheader->version; + (struct rtlwifi_firmware_header *)rtlhal->pfirmware; + rtlhal->fw_version = le16_to_cpu(pfwheader->version); rtlhal->fw_subversion = pfwheader->subversion; pfwdata = (u8 *)rtlhal->pfirmware; fwsize = rtlhal->fwsize; @@ -255,8 +255,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) "Firmware Version(%d), Signature(%#x)\n", pfwheader->version, pfwheader->signature); - pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header); - fwsize = fwsize - sizeof(struct rtl8821a_firmware_header); + pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header); + fwsize = fwsize - sizeof(struct rtlwifi_firmware_header); } if (rtlhal->mac_func_enable) { diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h index 591c14c0b9b5..8f5b4aade3c9 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h @@ -34,10 +34,10 @@ #define FW_8821AE_POLLING_TIMEOUT_COUNT 6000 #define IS_FW_HEADER_EXIST_8812(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x9500) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500) #define IS_FW_HEADER_EXIST_8821(_pfwhdr) \ - ((_pfwhdr->signature&0xFFF0) == 0x2100) + ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100) #define USE_OLD_WOWLAN_DEBUG_FW 0 @@ -137,25 +137,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -struct rtl8821a_firmware_header { - u16 signature; - u8 category; - u8 function; - u16 version; - u8 subversion; - u8 rsvd1; - u8 month; - u8 date; - u8 hour; - u8 minute; - u16 ramcodeSize; - u16 rsvd2; - u32 svnindex; - u32 rsvd3; - u32 rsvd4; - u32 rsvd5; -}; - enum rtl8812_c2h_evt { C2H_8812_DBG = 0, C2H_8812_LB = 1, diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 2b770b5e2620..b90ca618b123 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -222,6 +222,25 @@ enum rf_tx_num { #define WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9) #define WOL_REASON_REALWOW_V2_ACKLOST BIT(10) +struct rtlwifi_firmware_header { + __le16 signature; + u8 category; + u8 function; + __le16 version; + u8 subversion; + u8 rsvd1; + u8 month; + u8 date; + u8 hour; + u8 minute; + __le16 ramcodeSize; + __le16 rsvd2; + __le32 svnindex; + __le32 rsvd3; + __le32 rsvd4; + __le32 rsvd5; +}; + struct txpower_info_2g { u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G]; @@ -2064,16 +2083,12 @@ struct rtl_tcb_desc { bool tx_enable_sw_calc_duration; }; -struct rtl92c_firmware_header; - struct rtl_wow_pattern { u8 type; u16 crc; u32 mask[4]; }; -struct rtl8723e_firmware_header; - struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); @@ -2177,7 +2192,7 @@ struct rtl_hal_ops { void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); bool (*get_btc_status) (void); - bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr); + bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); u32 (*rx_command_packet)(struct ieee80211_hw *hw, struct rtl_stats status, struct sk_buff *skb); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c index 0c0d5cd98514..7c355fff2c5e 100644 --- a/drivers/net/wireless/ti/wl12xx/scan.c +++ b/drivers/net/wireless/ti/wl12xx/scan.c @@ -118,7 +118,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, if (passive) scan_options |= WL1271_SCAN_OPT_PASSIVE; - cmd->params.role_id = wlvif->role_id; + /* scan on the dev role if the regular one is not started */ + if (wlcore_is_p2p_mgmt(wlvif)) + cmd->params.role_id = wlvif->dev_role_id; + else + cmd->params.role_id = wlvif->role_id; if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c index 67f2a0eec854..4be0409308cb 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.c +++ b/drivers/net/wireless/ti/wl18xx/acx.c @@ -282,3 +282,30 @@ out: kfree(acx); return ret; } + +int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl) +{ + struct acx_dynamic_fw_traces_cfg *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d", + wl->dynamic_fw_traces); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces); + + ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx config dynamic fw traces failed: %d", ret); + goto out; + } +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h index 4afccd4b9467..342a2993ef98 100644 --- a/drivers/net/wireless/ti/wl18xx/acx.h +++ b/drivers/net/wireless/ti/wl18xx/acx.h @@ -35,7 +35,8 @@ enum { ACX_PEER_CAP = 0x0056, ACX_INTERRUPT_NOTIFY = 0x0057, ACX_RX_BA_FILTER = 0x0058, - ACX_AP_SLEEP_CFG = 0x0059 + ACX_AP_SLEEP_CFG = 0x0059, + ACX_DYNAMIC_TRACES_CFG = 0x005A, }; /* numbers of bits the length field takes (add 1 for the actual number) */ @@ -92,27 +93,26 @@ struct wl18xx_acx_checksum_state { struct wl18xx_acx_error_stats { - u32 error_frame; - u32 error_null_Frame_tx_start; - u32 error_numll_frame_cts_start; - u32 error_bar_retry; - u32 error_frame_cts_nul_flid; -} __packed; - -struct wl18xx_acx_debug_stats { - u32 debug1; - u32 debug2; - u32 debug3; - u32 debug4; - u32 debug5; - u32 debug6; -} __packed; - -struct wl18xx_acx_ring_stats { - u32 prepared_descs; - u32 tx_cmplt; + u32 error_frame_non_ctrl; + u32 error_frame_ctrl; + u32 error_frame_during_protection; + u32 null_frame_tx_start; + u32 null_frame_cts_start; + u32 bar_retry; + u32 num_frame_cts_nul_flid; + u32 tx_abort_failure; + u32 tx_resume_failure; + u32 rx_cmplt_db_overflow_cnt; + u32 elp_while_rx_exch; + u32 elp_while_tx_exch; + u32 elp_while_tx; + u32 elp_while_nvic_pending; + u32 rx_excessive_frame_len; + u32 burst_mismatch; + u32 tbc_exch_mismatch; } __packed; +#define NUM_OF_RATES_INDEXES 30 struct wl18xx_acx_tx_stats { u32 tx_prepared_descs; u32 tx_cmplt; @@ -122,7 +122,7 @@ struct wl18xx_acx_tx_stats { u32 tx_data_programmed; u32 tx_burst_programmed; u32 tx_starts; - u32 tx_imm_resp; + u32 tx_stop; u32 tx_start_templates; u32 tx_start_int_templates; u32 tx_start_fw_gen; @@ -131,13 +131,14 @@ struct wl18xx_acx_tx_stats { u32 tx_exch; u32 tx_retry_template; u32 tx_retry_data; + u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES]; u32 tx_exch_pending; u32 tx_exch_expiry; u32 tx_done_template; u32 tx_done_data; u32 tx_done_int_template; - u32 tx_frame_checksum; - u32 tx_checksum_result; + u32 tx_cfe1; + u32 tx_cfe2; u32 frag_called; u32 frag_mpdu_alloc_failed; u32 frag_init_called; @@ -165,11 +166,8 @@ struct wl18xx_acx_rx_stats { u32 rx_cmplt_task; u32 rx_phy_hdr; u32 rx_timeout; + u32 rx_rts_timeout; u32 rx_timeout_wa; - u32 rx_wa_density_dropped_frame; - u32 rx_wa_ba_not_expected; - u32 rx_frame_checksum; - u32 rx_checksum_result; u32 defrag_called; u32 defrag_init_called; u32 defrag_in_process_called; @@ -179,6 +177,7 @@ struct wl18xx_acx_rx_stats { u32 decrypt_key_not_found; u32 defrag_need_decrypt; u32 rx_tkip_replays; + u32 rx_xfr; } __packed; struct wl18xx_acx_isr_stats { @@ -193,21 +192,13 @@ struct wl18xx_acx_pwr_stats { u32 connection_out_of_sync; u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD]; u32 rcvd_awake_bcns_cnt; -} __packed; - -struct wl18xx_acx_event_stats { - u32 calibration; - u32 rx_mismatch; - u32 rx_mem_empty; -} __packed; - -struct wl18xx_acx_ps_poll_stats { - u32 ps_poll_timeouts; - u32 upsd_timeouts; - u32 upsd_max_ap_turn; - u32 ps_poll_max_ap_turn; - u32 ps_poll_utilization; - u32 upsd_utilization; + u32 sleep_time_count; + u32 sleep_time_avg; + u32 sleep_cycle_avg; + u32 sleep_percent; + u32 ap_sleep_active_conf; + u32 ap_sleep_user_conf; + u32 ap_sleep_counter; } __packed; struct wl18xx_acx_rx_filter_stats { @@ -227,11 +218,11 @@ struct wl18xx_acx_rx_rate_stats { } __packed; #define AGGR_STATS_TX_AGG 16 -#define AGGR_STATS_TX_RATE 16 #define AGGR_STATS_RX_SIZE_LEN 16 struct wl18xx_acx_aggr_stats { - u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE]; + u32 tx_agg_rate[AGGR_STATS_TX_AGG]; + u32 tx_agg_len[AGGR_STATS_TX_AGG]; u32 rx_size[AGGR_STATS_RX_SIZE_LEN]; } __packed; @@ -240,8 +231,6 @@ struct wl18xx_acx_aggr_stats { struct wl18xx_acx_pipeline_stats { u32 hs_tx_stat_fifo_int; u32 hs_rx_stat_fifo_int; - u32 tcp_tx_stat_fifo_int; - u32 tcp_rx_stat_fifo_int; u32 enc_tx_stat_fifo_int; u32 enc_rx_stat_fifo_int; u32 rx_complete_stat_fifo_int; @@ -249,38 +238,61 @@ struct wl18xx_acx_pipeline_stats { u32 post_proc_swi; u32 sec_frag_swi; u32 pre_to_defrag_swi; - u32 defrag_to_csum_swi; - u32 csum_to_rx_xfer_swi; + u32 defrag_to_rx_xfer_swi; u32 dec_packet_in; u32 dec_packet_in_fifo_full; u32 dec_packet_out; - u32 cs_rx_packet_in; - u32 cs_rx_packet_out; u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO]; + u16 padding; } __packed; -struct wl18xx_acx_mem_stats { - u32 rx_free_mem_blks; - u32 tx_free_mem_blks; - u32 fwlog_free_mem_blks; - u32 fw_gen_free_mem_blks; +#define DIVERSITY_STATS_NUM_OF_ANT 2 + +struct wl18xx_acx_diversity_stats { + u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT]; + u32 total_num_of_toggles; +} __packed; + +struct wl18xx_acx_thermal_stats { + u16 irq_thr_low; + u16 irq_thr_high; + u16 tx_stop; + u16 tx_resume; + u16 false_irq; + u16 adc_source_unexpected; +} __packed; + +#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18 +struct wl18xx_acx_calib_failure_stats { + u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS]; + u32 calib_count; +} __packed; + +struct wl18xx_roaming_stats { + s32 rssi_level; +} __packed; + +struct wl18xx_dfs_stats { + u32 num_of_radar_detections; } __packed; struct wl18xx_acx_statistics { struct acx_header header; struct wl18xx_acx_error_stats error; - struct wl18xx_acx_debug_stats debug; struct wl18xx_acx_tx_stats tx; struct wl18xx_acx_rx_stats rx; struct wl18xx_acx_isr_stats isr; struct wl18xx_acx_pwr_stats pwr; - struct wl18xx_acx_ps_poll_stats ps_poll; struct wl18xx_acx_rx_filter_stats rx_filter; struct wl18xx_acx_rx_rate_stats rx_rate; struct wl18xx_acx_aggr_stats aggr_size; struct wl18xx_acx_pipeline_stats pipeline; - struct wl18xx_acx_mem_stats mem; + struct wl18xx_acx_diversity_stats diversity; + struct wl18xx_acx_thermal_stats thermal; + struct wl18xx_acx_calib_failure_stats calib; + struct wl18xx_roaming_stats roaming; + struct wl18xx_dfs_stats dfs; } __packed; struct wl18xx_acx_clear_statistics { @@ -367,6 +379,15 @@ struct acx_ap_sleep_cfg { u8 idle_conn_thresh; } __packed; +/* + * ACX_DYNAMIC_TRACES_CFG + * configure the FW dynamic traces + */ +struct acx_dynamic_fw_traces_cfg { + struct acx_header header; + __le32 dynamic_fw_traces; +} __packed; + int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap, u32 sdio_blk_size, u32 extra_mem_blks, u32 len_field_size); @@ -380,5 +401,6 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl, int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action); int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action); int wl18xx_acx_ap_sleep(struct wl1271 *wl); +int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl); #endif /* __WL18XX_ACX_H__ */ diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 5fbd2230f372..4edfe28395f0 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -36,18 +36,23 @@ DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics) -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u"); - -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u"); @@ -57,7 +62,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u"); @@ -66,13 +71,15 @@ WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate, + NUM_OF_RATES_INDEXES); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u"); @@ -97,11 +104,8 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u"); @@ -111,6 +115,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u"); @@ -120,14 +125,13 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread, PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD); WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u"); - - -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u"); @@ -141,14 +145,14 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); -WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, - AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate, + AGGR_STATS_TX_AGG); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len, + AGGR_STATS_TX_AGG); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size, AGGR_STATS_RX_SIZE_LEN); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u"); @@ -156,21 +160,32 @@ WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u"); WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full, PIPE_STATS_HW_FIFO); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u"); -WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant, + DIVERSITY_STATS_NUM_OF_ANT); +WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u"); +WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count, + WL18XX_NUM_OF_CALIBRATIONS_ERRORS); +WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u"); + +WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d"); + +WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d"); static ssize_t conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -281,6 +296,55 @@ static const struct file_operations radar_detection_ops = { .llseek = default_llseek, }; +static ssize_t dynamic_fw_traces_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 0, &value); + if (ret < 0) + return ret; + + mutex_lock(&wl->mutex); + + wl->dynamic_fw_traces = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl18xx_acx_dynamic_fw_traces(wl); + if (ret < 0) + count = ret; + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static ssize_t dynamic_fw_traces_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + return wl1271_format_buffer(userbuf, count, ppos, + "%d\n", wl->dynamic_fw_traces); +} + +static const struct file_operations dynamic_fw_traces_ops = { + .read = dynamic_fw_traces_read, + .write = dynamic_fw_traces_write, + .open = simple_open, + .llseek = default_llseek, +}; + int wl18xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { @@ -301,18 +365,23 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(clear_fw_stats, stats); - DEBUGFS_FWSTATS_ADD(debug, debug1); - DEBUGFS_FWSTATS_ADD(debug, debug2); - DEBUGFS_FWSTATS_ADD(debug, debug3); - DEBUGFS_FWSTATS_ADD(debug, debug4); - DEBUGFS_FWSTATS_ADD(debug, debug5); - DEBUGFS_FWSTATS_ADD(debug, debug6); - - DEBUGFS_FWSTATS_ADD(error, error_frame); - DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start); - DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start); - DEBUGFS_FWSTATS_ADD(error, error_bar_retry); - DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid); + DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl); + DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl); + DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection); + DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start); + DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start); + DEBUGFS_FWSTATS_ADD(error, bar_retry); + DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid); + DEBUGFS_FWSTATS_ADD(error, tx_abort_failure); + DEBUGFS_FWSTATS_ADD(error, tx_resume_failure); + DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt); + DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch); + DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch); + DEBUGFS_FWSTATS_ADD(error, elp_while_tx); + DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending); + DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len); + DEBUGFS_FWSTATS_ADD(error, burst_mismatch); + DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch); DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs); DEBUGFS_FWSTATS_ADD(tx, tx_cmplt); @@ -322,7 +391,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed); DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed); DEBUGFS_FWSTATS_ADD(tx, tx_starts); - DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp); + DEBUGFS_FWSTATS_ADD(tx, tx_stop); DEBUGFS_FWSTATS_ADD(tx, tx_start_templates); DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates); DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen); @@ -331,13 +400,14 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(tx, tx_exch); DEBUGFS_FWSTATS_ADD(tx, tx_retry_template); DEBUGFS_FWSTATS_ADD(tx, tx_retry_data); + DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate); DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending); DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry); DEBUGFS_FWSTATS_ADD(tx, tx_done_template); DEBUGFS_FWSTATS_ADD(tx, tx_done_data); DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template); - DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum); - DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result); + DEBUGFS_FWSTATS_ADD(tx, tx_cfe1); + DEBUGFS_FWSTATS_ADD(tx, tx_cfe2); DEBUGFS_FWSTATS_ADD(tx, frag_called); DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed); DEBUGFS_FWSTATS_ADD(tx, frag_init_called); @@ -362,11 +432,8 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task); DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr); DEBUGFS_FWSTATS_ADD(rx, rx_timeout); + DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout); DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa); - DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame); - DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected); - DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum); - DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result); DEBUGFS_FWSTATS_ADD(rx, defrag_called); DEBUGFS_FWSTATS_ADD(rx, defrag_init_called); DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called); @@ -376,6 +443,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found); DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt); DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays); + DEBUGFS_FWSTATS_ADD(rx, rx_xfr); DEBUGFS_FWSTATS_ADD(isr, irqs); @@ -384,13 +452,13 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync); DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread); DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt); - - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn); - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn); - DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization); - DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization); + DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count); + DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg); + DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg); + DEBUGFS_FWSTATS_ADD(pwr, sleep_percent); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf); + DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter); DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter); DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter); @@ -404,12 +472,11 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates); - DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate); + DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate); + DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len); DEBUGFS_FWSTATS_ADD(aggr_size, rx_size); DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int); - DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int); - DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int); DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int); @@ -417,22 +484,33 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi); DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi); DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi); - DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi); - DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi); + DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full); DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out); - DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in); - DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out); DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full); - DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks); - DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks); + DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant); + DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles); + + DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low); + DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high); + DEBUGFS_FWSTATS_ADD(thermal, tx_stop); + DEBUGFS_FWSTATS_ADD(thermal, tx_resume); + DEBUGFS_FWSTATS_ADD(thermal, false_irq); + DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected); + + DEBUGFS_FWSTATS_ADD(calib, fail_count); + + DEBUGFS_FWSTATS_ADD(calib, calib_count); + + DEBUGFS_FWSTATS_ADD(roaming, rssi_level); + + DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections); DEBUGFS_ADD(conf, moddir); DEBUGFS_ADD(radar_detection, moddir); + DEBUGFS_ADD(dynamic_fw_traces, moddir); return 0; diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index 548bb9e7e91e..09c7e098f460 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -112,6 +112,14 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl, return 0; } +static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb) +{ + u32 clock; + /* convert the MSB+LSB to a u32 TSF value */ + clock = (tsf_msb << 16) | tsf_lsb; + wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock); +} + int wl18xx_process_mailbox_events(struct wl1271 *wl) { struct wl18xx_event_mailbox *mbox = wl->mbox; @@ -128,6 +136,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) wl18xx_scan_completed(wl, wl->scan_wlvif); } + if (vector & TIME_SYNC_EVENT_ID) + wlcore_event_time_sync(wl, + mbox->time_sync_tsf_msb, + mbox->time_sync_tsf_lsb); + if (vector & RADAR_DETECTED_EVENT_ID) { wl1271_info("radar event: channel %d type %s", mbox->radar_channel, diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index 266ee87834e4..f3d4f13379cb 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -38,8 +38,9 @@ enum { REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18), DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19), PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20), - SMART_CONFIG_SYNC_EVENT_ID = BIT(22), - SMART_CONFIG_DECODE_EVENT_ID = BIT(23), + SMART_CONFIG_SYNC_EVENT_ID = BIT(22), + SMART_CONFIG_DECODE_EVENT_ID = BIT(23), + TIME_SYNC_EVENT_ID = BIT(24), }; enum wl18xx_radar_types { @@ -95,13 +96,16 @@ struct wl18xx_event_mailbox { /* smart config sync channel */ u8 sc_sync_channel; u8 sc_sync_band; - u8 padding2[2]; + /* time sync msb*/ + u16 time_sync_tsf_msb; /* radar detect */ u8 radar_channel; u8 radar_type; - u8 padding3[2]; + /* time sync lsb*/ + u16 time_sync_tsf_lsb; + } __packed; int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 49aca2cf7605..abbf054fb6da 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -422,6 +422,8 @@ static struct wlcore_conf wl18xx_conf = { .num_probe_reqs = 2, .rssi_threshold = -90, .snr_threshold = 0, + .num_short_intervals = SCAN_MAX_SHORT_INTERVALS, + .long_interval = 30000, }, .ht = { .rx_ba_win_size = 32, @@ -1026,8 +1028,8 @@ static int wl18xx_boot(struct wl1271 *wl) CHANNEL_SWITCH_COMPLETE_EVENT_ID | DFS_CHANNELS_CONFIG_COMPLETE_EVENT | SMART_CONFIG_SYNC_EVENT_ID | - SMART_CONFIG_DECODE_EVENT_ID; -; + SMART_CONFIG_DECODE_EVENT_ID | + TIME_SYNC_EVENT_ID; wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID; @@ -1159,6 +1161,11 @@ static int wl18xx_hw_init(struct wl1271 *wl) if (ret < 0) return ret; + /* set the dynamic fw traces bitmap */ + ret = wl18xx_acx_dynamic_fw_traces(wl); + if (ret < 0) + return ret; + if (checksum_param) { ret = wl18xx_acx_set_checksum_state(wl); if (ret != 0) @@ -1797,7 +1804,7 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = { static const struct ieee80211_iface_limit wl18xx_iface_limits[] = { { - .max = 3, + .max = 2, .types = BIT(NL80211_IFTYPE_STATION), }, { @@ -1806,6 +1813,10 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = { BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT), }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, }; static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = { @@ -1813,6 +1824,48 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = { .max = 2, .types = BIT(NL80211_IFTYPE_AP), }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_CLIENT), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, +}; + +static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_STATION), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_AP), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_GO), + }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), + }, }; static const struct ieee80211_iface_combination diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c index 98666f235a12..c938c494c785 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.c +++ b/drivers/net/wireless/ti/wl18xx/scan.c @@ -51,7 +51,11 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, goto out; } - cmd->role_id = wlvif->role_id; + /* scan on the dev role if the regular one is not started */ + if (wlcore_is_p2p_mgmt(wlvif)) + cmd->role_id = wlvif->dev_role_id; + else + cmd->role_id = wlvif->role_id; if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) { ret = -EINVAL; @@ -223,9 +227,20 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl, SCAN_TYPE_PERIODIC); wl18xx_adjust_channels(cmd, cmd_channels); - cmd->short_cycles_sec = 0; - cmd->long_cycles_sec = cpu_to_le16(req->interval); - cmd->short_cycles_count = 0; + if (c->num_short_intervals && c->long_interval && + c->long_interval > req->interval) { + cmd->short_cycles_msec = cpu_to_le16(req->interval); + cmd->long_cycles_msec = cpu_to_le16(c->long_interval); + cmd->short_cycles_count = c->num_short_intervals; + } else { + cmd->short_cycles_msec = 0; + cmd->long_cycles_msec = cpu_to_le16(req->interval); + cmd->short_cycles_count = 0; + } + wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d", + le16_to_cpu(cmd->short_cycles_msec), + le16_to_cpu(cmd->long_cycles_msec), + cmd->short_cycles_count); cmd->total_cycles = 0; diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h index 2e636aa5dba9..66a763f644d2 100644 --- a/drivers/net/wireless/ti/wl18xx/scan.h +++ b/drivers/net/wireless/ti/wl18xx/scan.h @@ -74,8 +74,8 @@ struct wl18xx_cmd_scan_params { u8 dfs; /* number of dfs channels in 5ghz */ u8 passive_active; /* number of passive before active channels 2.4ghz */ - __le16 short_cycles_sec; - __le16 long_cycles_sec; + __le16 short_cycles_msec; + __le16 long_cycles_msec; u8 short_cycles_count; u8 total_cycles; /* 0 - infinite */ u8 padding[2]; diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 68919f8d4310..f01d24baff7c 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -2003,12 +2003,15 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, wlvif->bss_type == BSS_TYPE_IBSS))) return -EINVAL; - ret = wl12xx_cmd_role_enable(wl, - wl12xx_wlvif_to_vif(wlvif)->addr, - WL1271_ROLE_DEVICE, - &wlvif->dev_role_id); - if (ret < 0) - goto out; + /* the dev role is already started for p2p mgmt interfaces */ + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_enable(wl, + wl12xx_wlvif_to_vif(wlvif)->addr, + WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + } ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel); if (ret < 0) @@ -2023,7 +2026,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif, out_stop: wl12xx_cmd_role_stop_dev(wl, wlvif); out_disable: - wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (!wlcore_is_p2p_mgmt(wlvif)) + wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); out: return ret; } @@ -2052,10 +2056,42 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (ret < 0) goto out; - ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); - if (ret < 0) - goto out; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (ret < 0) + goto out; + } out: return ret; } + +int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 feature, u8 enable, u8 value) +{ + struct wlcore_cmd_generic_cfg *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, + "cmd generic cfg (role %d feature %d enable %d value %d)", + wlvif->role_id, feature, enable, value); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->role_id = wlvif->role_id; + cmd->feature = feature; + cmd->enable = enable; + cmd->value = value; + + ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to send generic cfg command"); + goto out_free; + } +out_free: + kfree(cmd); + return ret; +} +EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg); diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index e14cd407a6ae..8dc46c0a489a 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -92,6 +92,8 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel, enum ieee80211_band band); int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl); +int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, + u8 feature, u8 enable, u8 value); int wl12xx_cmd_config_fwlog(struct wl1271 *wl); int wl12xx_cmd_start_fwlog(struct wl1271 *wl); int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); @@ -652,6 +654,19 @@ struct wl12xx_cmd_regdomain_dfs_config { u8 padding[3]; } __packed; +enum wlcore_generic_cfg_feature { + WLCORE_CFG_FEATURE_RADAR_DEBUG = 2, +}; + +struct wlcore_cmd_generic_cfg { + struct wl1271_cmd_header header; + + u8 role_id; + u8 feature; + u8 enable; + u8 value; +} __packed; + struct wl12xx_cmd_config_fwlog { struct wl1271_cmd_header header; diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h index 166add00b50f..52a9d1b14020 100644 --- a/drivers/net/wireless/ti/wlcore/conf.h +++ b/drivers/net/wireless/ti/wlcore/conf.h @@ -1186,6 +1186,15 @@ struct conf_sched_scan_settings { /* SNR threshold to be used for filtering */ s8 snr_threshold; + + /* + * number of short intervals scheduled scan cycles before + * switching to long intervals + */ + u8 num_short_intervals; + + /* interval between each long scheduled scan cycle (in ms) */ + u16 long_interval; } __packed; struct conf_ht_setting { @@ -1352,7 +1361,7 @@ struct conf_recovery_settings { * version, the two LSB are the lower driver's private conf * version. */ -#define WLCORE_CONF_VERSION (0x0006 << 16) +#define WLCORE_CONF_VERSION (0x0007 << 16) #define WLCORE_CONF_MASK 0xffff0000 #define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \ sizeof(struct wlcore_conf)) diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 5ca1fb161a50..e92f2639af2c 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -348,7 +348,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl) } /* generic sta initialization (non vif-specific) */ -static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) +int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h index a45fbfddec19..fd1cdb6bc3e4 100644 --- a/drivers/net/wireless/ti/wlcore/init.h +++ b/drivers/net/wireless/ti/wlcore/init.h @@ -35,5 +35,6 @@ int wl1271_hw_init(struct wl1271 *wl); int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif); int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif); int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif); +int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif); #endif diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 337223b9f6f8..e819369d8f8f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1792,6 +1792,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw, wl->wow_enabled = true; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + ret = wl1271_configure_suspend(wl, wlvif, wow); if (ret < 0) { mutex_unlock(&wl->mutex); @@ -1901,6 +1904,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw) goto out; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + wl1271_configure_resume(wl, wlvif); } @@ -2256,6 +2262,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_DEVICE: wlvif->bss_type = BSS_TYPE_STA_BSS; break; case NL80211_IFTYPE_ADHOC: @@ -2477,7 +2484,8 @@ static void wlcore_hw_queue_iter(void *data, u8 *mac, { struct wlcore_hw_queue_iter_data *iter_data = data; - if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) + if (vif->type == NL80211_IFTYPE_P2P_DEVICE || + WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) return; if (iter_data->cur_running || vif == iter_data->vif) { @@ -2495,6 +2503,11 @@ static int wlcore_allocate_hw_queue_base(struct wl1271 *wl, struct wlcore_hw_queue_iter_data iter_data = {}; int i, q_base; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; + return 0; + } + iter_data.vif = vif; /* mark all bits taken by active interfaces */ @@ -2618,14 +2631,27 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, goto out; } - ret = wl12xx_cmd_role_enable(wl, vif->addr, - role_type, &wlvif->role_id); - if (ret < 0) - goto out; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_enable(wl, vif->addr, + role_type, &wlvif->role_id); + if (ret < 0) + goto out; - ret = wl1271_init_vif_specific(wl, vif); - if (ret < 0) - goto out; + ret = wl1271_init_vif_specific(wl, vif); + if (ret < 0) + goto out; + + } else { + ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE, + &wlvif->dev_role_id); + if (ret < 0) + goto out; + + /* needed mainly for configuring rate policies */ + ret = wl1271_sta_hw_init(wl, wlvif); + if (ret < 0) + goto out; + } list_add(&wlvif->list, &wl->wlvif_list); set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); @@ -2696,9 +2722,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, wl12xx_stop_dev(wl, wlvif); } - ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); - if (ret < 0) - goto deinit; + if (!wlcore_is_p2p_mgmt(wlvif)) { + ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); + if (ret < 0) + goto deinit; + } else { + ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); + if (ret < 0) + goto deinit; + } wl1271_ps_elp_sleep(wl); } @@ -3088,6 +3120,9 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, { int ret; + if (wlcore_is_p2p_mgmt(wlvif)) + return 0; + if (conf->power_level != wlvif->power_level) { ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); if (ret < 0) @@ -3207,6 +3242,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, goto out; wl12xx_for_each_wlvif(wl, wlvif) { + if (wlcore_is_p2p_mgmt(wlvif)) + continue; + if (wlvif->bss_type != BSS_TYPE_AP_BSS) { if (*total & FIF_ALLMULTI) ret = wl1271_acx_group_address_tbl(wl, wlvif, @@ -4837,6 +4875,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u8 ps_scheme; int ret = 0; + if (wlcore_is_p2p_mgmt(wlvif)) + return 0; + mutex_lock(&wl->mutex); wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); @@ -6078,8 +6119,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); wl->hw->wiphy->max_scan_ssids = 1; wl->hw->wiphy->max_sched_scan_ssids = 16; wl->hw->wiphy->max_match_sets = 16; diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index 7df672a84530..5b2927391d1c 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -74,6 +74,12 @@ static void wl1271_rx_status(struct wl1271 *wl, if (desc->rate <= wl->hw_min_ht_rate) status->flag |= RX_FLAG_HT; + /* + * Read the signal level and antenna diversity indication. + * The msb in the signal level is always set as it is a + * negative number. + * The antenna indication is the msb of the rssi. + */ status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7)); status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7); diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h index 4dadd0c62cde..782eb297c196 100644 --- a/drivers/net/wireless/ti/wlcore/scan.h +++ b/drivers/net/wireless/ti/wlcore/scan.h @@ -83,6 +83,12 @@ struct wl1271_cmd_trigger_scan_to { #define MAX_CHANNELS_5GHZ 42 #define SCAN_MAX_CYCLE_INTERVALS 16 + +/* The FW intervals can take up to 16 entries. + * The 1st entry isn't used (scan is immediate). The last + * entry should be used for the long_interval + */ +#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2) #define SCAN_MAX_BANDS 3 enum { diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 7f363fa566a3..a1b6040e6491 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -500,6 +500,9 @@ struct wl1271 { /* interface combinations supported by the hw */ const struct ieee80211_iface_combination *iface_combinations; u8 n_iface_combinations; + + /* dynamic fw traces */ + u32 dynamic_fw_traces; }; int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 39efc6d78b10..27c56876b2c1 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -503,6 +503,11 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif) return container_of((void *)wlvif, struct ieee80211_vif, drv_priv); } +static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif) +{ + return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE; +} + #define wl12xx_for_each_wlvif(wl, wlvif) \ list_for_each_entry(wlvif, &wl->wlvif_list, list) diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 1a83e190fc15..28577a31549d 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) { atomic_dec(&queue->inflight_packets); + + /* Wake the dealloc thread _after_ decrementing inflight_packets so + * that if kthread_stop() has already been called, the dealloc thread + * does not wait forever with nothing to wake it. + */ + wake_up(&queue->dealloc_wq); } int xenvif_schedulable(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 7d50711476fe..3f44b522b831 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *gop) + struct gnttab_map_grant_ref *gop, + unsigned int frag_overflow, + struct sk_buff *nskb) { struct skb_shared_info *shinfo = skb_shinfo(skb); skb_frag_t *frags = shinfo->frags; u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; int start; pending_ring_idx_t index; - unsigned int nr_slots, frag_overflow = 0; + unsigned int nr_slots; - /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. - */ - if (shinfo->nr_frags > MAX_SKB_FRAGS) { - frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS; - BUG_ON(frag_overflow > MAX_SKB_FRAGS); - shinfo->nr_frags = MAX_SKB_FRAGS; - } nr_slots = shinfo->nr_frags; /* Skip first skb fragment if it is on same page as header fragment. */ @@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que } if (frag_overflow) { - struct sk_buff *nskb = xenvif_alloc_skb(0); - if (unlikely(nskb == NULL)) { - if (net_ratelimit()) - netdev_err(queue->vif->dev, - "Can't allocate the frag_list skb.\n"); - return NULL; - } shinfo = skb_shinfo(nskb); frags = shinfo->frags; @@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, unsigned *copy_ops, unsigned *map_ops) { - struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; - struct sk_buff *skb; + struct gnttab_map_grant_ref *gop = queue->tx_map_ops; + struct sk_buff *skb, *nskb; int ret; + unsigned int frag_overflow; while (skb_queue_len(&queue->tx_queue) < budget) { struct xen_netif_tx_request txreq; @@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, break; } + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) + skb_shinfo(skb)->nr_frags++; + /* At this point shinfo->nr_frags is in fact the number of + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. + */ + frag_overflow = 0; + nskb = NULL; + if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) { + frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS; + BUG_ON(frag_overflow > MAX_SKB_FRAGS); + skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; + nskb = xenvif_alloc_skb(0); + if (unlikely(nskb == NULL)) { + kfree_skb(skb); + xenvif_tx_err(queue, &txreq, idx); + if (net_ratelimit()) + netdev_err(queue->vif->dev, + "Can't allocate the frag_list skb.\n"); + break; + } + } + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { struct xen_netif_extra_info *gso; gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; @@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ kfree_skb(skb); + kfree_skb(nskb); break; } } @@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, (*copy_ops)++; - skb_shinfo(skb)->nr_frags = ret; if (data_len < txreq.size) { - skb_shinfo(skb)->nr_frags++; frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); @@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->pending_cons++; - request_gop = xenvif_get_requests(queue, skb, txfrags, gop); - if (request_gop == NULL) { - kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); - break; - } - gop = request_gop; + gop = xenvif_get_requests(queue, skb, txfrags, gop, + frag_overflow, nskb); __skb_queue_tail(&queue->tx_queue, skb); @@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) smp_wmb(); queue->dealloc_prod++; } while (ubuf); - wake_up(&queue->dealloc_wq); spin_unlock_irqrestore(&queue->callback_lock, flags); if (likely(zerocopy_success)) diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 23435f2a5486..2e2530743831 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb) ntb->dev.bus = &ntb_bus; ntb->dev.parent = &ntb->pdev->dev; ntb->dev.release = ntb_dev_release; - dev_set_name(&ntb->dev, pci_name(ntb->pdev)); + dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev)); ntb->ctx = NULL; ntb->ctx_ops = NULL; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index efe3ad4122f2..1c6386d5f79c 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -142,10 +142,11 @@ struct ntb_transport_qp { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); + struct list_head rx_post_q; struct list_head rx_pend_q; struct list_head rx_free_q; - spinlock_t ntb_rx_pend_q_lock; - spinlock_t ntb_rx_free_q_lock; + /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ + spinlock_t ntb_rx_q_lock; void *rx_buff; unsigned int rx_index; unsigned int rx_max_entry; @@ -211,6 +212,8 @@ struct ntb_transport_ctx { bool link_is_up; struct delayed_work link_work; struct work_struct link_cleanup; + + struct dentry *debugfs_node_dir; }; enum { @@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, char *buf; ssize_t ret, out_offset, out_count; + qp = filp->private_data; + + if (!qp || !qp->link_is_up) + return 0; + out_count = 1000; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; - qp = filp->private_data; out_offset = 0; out_offset += snprintf(buf + out_offset, out_count - out_offset, "NTB QP stats\n"); @@ -534,6 +541,27 @@ out: return entry; } +static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, + struct list_head *list, + struct list_head *to_list) +{ + struct ntb_queue_entry *entry; + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + if (list_empty(list)) { + entry = NULL; + } else { + entry = list_first_entry(list, struct ntb_queue_entry, entry); + list_move_tail(&entry->entry, to_list); + } + + spin_unlock_irqrestore(lock, flags); + + return entry; +} + static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { @@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, - unsigned int size) + resource_size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct pci_dev *pdev = nt->ndev->pdev; - unsigned int xlat_size, buff_size; + size_t xlat_size, buff_size; int rc; + if (!size) + return -EINVAL; + xlat_size = round_up(size, mw->xlat_align_size); buff_size = round_up(size, mw->xlat_align); @@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, if (!mw->virt_addr) { mw->xlat_size = 0; mw->buff_size = 0; - dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", + dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n", buff_size); return -ENOMEM; } @@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work) if (qp->event_handler) qp->event_handler(qp->cb_data, qp->link_is_up); + + tasklet_schedule(&qp->rxc_db_work); } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); @@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - if (nt_debugfs_dir) { + if (nt->debugfs_node_dir) { char debugfs_name[4]; snprintf(debugfs_name, 4, "qp%d", qp_num); qp->debugfs_dir = debugfs_create_dir(debugfs_name, - nt_debugfs_dir); + nt->debugfs_node_dir); qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_dir, qp, @@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); - spin_lock_init(&qp->ntb_rx_pend_q_lock); - spin_lock_init(&qp->ntb_rx_free_q_lock); + spin_lock_init(&qp->ntb_rx_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock); + INIT_LIST_HEAD(&qp->rx_post_q); INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->tx_free_q); @@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) goto err2; } + if (nt_debugfs_dir) { + nt->debugfs_node_dir = + debugfs_create_dir(pci_name(ndev->pdev), + nt_debugfs_dir); + } + for (i = 0; i < qp_count; i++) { rc = ntb_transport_init_queue(nt, i); if (rc) @@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) kfree(nt); } +static void ntb_complete_rxc(struct ntb_transport_qp *qp) +{ + struct ntb_queue_entry *entry; + void *cb_data; + unsigned int len; + unsigned long irqflags; + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + + while (!list_empty(&qp->rx_post_q)) { + entry = list_first_entry(&qp->rx_post_q, + struct ntb_queue_entry, entry); + if (!(entry->flags & DESC_DONE_FLAG)) + break; + + entry->rx_hdr->flags = 0; + iowrite32(entry->index, &qp->rx_info->entry); + + cb_data = entry->cb_data; + len = entry->len; + + list_move_tail(&entry->entry, &qp->rx_free_q); + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); + + if (qp->rx_handler && qp->client_ready) + qp->rx_handler(qp, qp->cb_data, cb_data, len); + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + } + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); +} + static void ntb_rx_copy_callback(void *data) { struct ntb_queue_entry *entry = data; - struct ntb_transport_qp *qp = entry->qp; - void *cb_data = entry->cb_data; - unsigned int len = entry->len; - struct ntb_payload_header *hdr = entry->rx_hdr; - hdr->flags = 0; + entry->flags |= DESC_DONE_FLAG; - iowrite32(entry->index, &qp->rx_info->entry); - - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); - - if (qp->rx_handler && qp->client_ready) - qp->rx_handler(qp, qp->cb_data, cb_data, len); + ntb_complete_rxc(entry->qp); } static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) @@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) ntb_rx_copy_callback(entry); } -static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, - size_t len) +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) { struct dma_async_tx_descriptor *txd; struct ntb_transport_qp *qp = entry->qp; struct dma_chan *chan = qp->dma_chan; struct dma_device *device; - size_t pay_off, buff_off; + size_t pay_off, buff_off, len; struct dmaengine_unmap_data *unmap; dma_cookie_t cookie; void *buf = entry->buf; - entry->len = len; + len = entry->len; if (!chan) goto err; @@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; void *offset; - int rc; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); @@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) return -EIO; } - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (!entry) { dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); qp->rx_err_no_buf++; - - rc = -ENOMEM; - goto err; + return -EAGAIN; } + entry->rx_hdr = hdr; + entry->index = qp->rx_index; + if (hdr->len > entry->len) { dev_dbg(&qp->ndev->pdev->dev, "receive buffer overflow! Wanted %d got %d\n", hdr->len, entry->len); qp->rx_err_oflow++; - rc = -EIO; - goto err; + entry->len = -EIO; + entry->flags |= DESC_DONE_FLAG; + + ntb_complete_rxc(qp); + } else { + dev_dbg(&qp->ndev->pdev->dev, + "RX OK index %u ver %u size %d into buf size %d\n", + qp->rx_index, hdr->ver, hdr->len, entry->len); + + qp->rx_bytes += hdr->len; + qp->rx_pkts++; + + entry->len = hdr->len; + + ntb_async_rx(entry, offset); } - dev_dbg(&qp->ndev->pdev->dev, - "RX OK index %u ver %u size %d into buf size %d\n", - qp->rx_index, hdr->ver, hdr->len, entry->len); - - qp->rx_bytes += hdr->len; - qp->rx_pkts++; - - entry->index = qp->rx_index; - entry->rx_hdr = hdr; - - ntb_async_rx(entry, offset, hdr->len); - qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return 0; - -err: - /* FIXME: if this syncrhonous update of the rx_index gets ahead of - * asyncrhonous ntb_rx_copy_callback of previous entry, there are three - * scenarios: - * - * 1) The peer might miss this update, but observe the update - * from the memcpy completion callback. In this case, the buffer will - * not be freed on the peer to be reused for a different packet. The - * successful rx of a later packet would clear the condition, but the - * condition could persist if several rx fail in a row. - * - * 2) The peer may observe this update before the asyncrhonous copy of - * prior packets is completed. The peer may overwrite the buffers of - * the prior packets before they are copied. - * - * 3) Both: the peer may observe the update, and then observe the index - * decrement by the asynchronous completion callback. Who knows what - * badness that will cause. - */ - hdr->flags = 0; - iowrite32(qp->rx_index, &qp->rx_info->entry); - - return rc; } static void ntb_transport_rxc_db(unsigned long data) @@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data) break; } - if (qp->dma_chan) + if (i && qp->dma_chan) dma_async_issue_pending(qp->dma_chan); if (i == qp->rx_max_entry) { @@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, goto err1; entry->qp = qp; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); } @@ -1634,7 +1674,7 @@ err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); if (qp->dma_chan) dma_release_channel(qp->dma_chan); @@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct ntb_transport_ctx *nt = qp->transport; struct pci_dev *pdev; struct ntb_queue_entry *entry; u64 qp_bit; @@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) qp->tx_handler = NULL; qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { - dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); + kfree(entry); + } + + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); kfree(entry); } while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); - nt->qp_bitmap_free |= qp_bit; + qp->transport->qp_bitmap_free |= qp_bit; dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); } @@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) if (!qp || qp->client_ready) return NULL; - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); if (!entry) return NULL; buf = entry->cb_data; *len = entry->len; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); return buf; } @@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, if (!qp) return -EINVAL; - entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); if (!entry) return -ENOMEM; entry->cb_data = cb; entry->buf = data; entry->len = len; + entry->flags = 0; - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); + + tasklet_schedule(&qp->rxc_db_work); return 0; } diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index e17c539e4f6f..2dad7e820ff0 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled) sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); } +EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect); static struct phy_ops sun4i_usb_phy_ops = { .init = sun4i_usb_phy_init, diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 3510b81db3fa..08020dc2c7c8 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -52,6 +54,8 @@ #define PLL_LOCK 0x2 #define PLL_IDLE 0x1 +#define SATA_PLL_SOFT_RESET BIT(18) + /* * This is an Empirical value that works, need to confirm the actual * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status @@ -82,6 +86,9 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; + struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */ + unsigned int dpll_reset_reg; /* reg. index within syscon */ + bool sata_refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) + /* If dpll_reset_syscon is not present we wont power down SATA DPLL + * due to Errata i783 + */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") && + !phy->dpll_reset_syscon) return 0; /* PCIe doesn't have internal DPLL */ @@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x) } } + /* i783: SATA needs control bit toggle after PLL unlock */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) { + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET); + regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg, + SATA_PLL_SOFT_RESET, 0); + } + ti_pipe3_disable_clocks(phy); return 0; @@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev) } } else { phy->wkupclk = ERR_PTR(-ENODEV); + phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node, + "syscon-pllreset"); + if (IS_ERR(phy->dpll_reset_syscon)) { + dev_info(&pdev->dev, + "can't get syscon-pllreset, sata dpll won't idle\n"); + phy->dpll_reset_syscon = NULL; + } else { + if (of_property_read_u32_index(node, + "syscon-pllreset", 1, + &phy->dpll_reset_reg)) { + dev_err(&pdev->dev, + "couldn't get pllreset reg. offset\n"); + return -EINVAL; + } + } } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { @@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); - /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ - if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) - if (!IS_ERR(phy->refclk)) + + /* + * Prevent auto-disable of refclk for SATA PHY due to Errata i783 + */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) { + if (!IS_ERR(phy->refclk)) { clk_prepare_enable(phy->refclk); + phy->sata_refclk_enabled = true; + } + } generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - if (!IS_ERR(phy->refclk)) + if (!IS_ERR(phy->refclk)) { clk_disable_unprepare(phy->refclk); + /* + * SATA refclk needs an additional disable as we left it + * on in probe to avoid Errata i783 + */ + if (phy->sata_refclk_enabled) { + clk_disable_unprepare(phy->refclk); + phy->sata_refclk_enabled = false; + } + } + if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); } diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index cb1329919527..3271cd1abe7c 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -4,7 +4,6 @@ menuconfig CHROME_PLATFORMS bool "Platform support for Chrome hardware" - depends on X86 || ARM ---help--- Say Y here to get to see options for platform support for various Chromebooks and Chromeboxes. This option alone does diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 882744852aac..a9aa38903efe 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, { struct ipr_trace_entry *trace_entry; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned int trace_index; - trace_entry = &ioa_cfg->trace[atomic_add_return - (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; + trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; + trace_entry = &ioa_cfg->trace[trace_index]; trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; @@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) { + unsigned int hrrq; + if (ioa_cfg->hrrq_num == 1) - return 0; - else - return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; + hrrq = 0; + else { + hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); + hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; + } + return hrrq; } /** @@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); - unsigned long hrrq_flags; + unsigned long lock_flags; scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { scsi_dma_unmap(scsi_cmd); - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { - spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(&ipr_cmd->hrrq->_lock); ipr_erp_start(ioa_cfg, ipr_cmd); - spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); + spin_unlock(&ipr_cmd->hrrq->_lock); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); } } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 73790a1d0969..6b97ee45c7b4 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg { #define IPR_NUM_TRACE_INDEX_BITS 8 #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) char trace_start[8]; #define IPR_TRACE_START_LABEL "trace" diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 106884a5444e..cfadccef045c 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, scmd->sdb.length); scmd->sdb.table.sgl = &ses->sense_sgl; scmd->sc_data_direction = DMA_FROM_DEVICE; - scmd->sdb.table.nents = 1; + scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1; scmd->cmnd[0] = REQUEST_SENSE; scmd->cmnd[4] = scmd->sdb.length; scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b1a263137a23..448ebdaa3d69 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) { - if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) + if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS) return; __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); } @@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) if (mq) { if (nents <= SCSI_MAX_SG_SEGMENTS) { - sdb->table.nents = nents; - sg_init_table(sdb->table.sgl, sdb->table.nents); + sdb->table.nents = sdb->table.orig_nents = nents; + sg_init_table(sdb->table.sgl, nents); return 0; } first_chunk = sdb->table.sgl; diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c index bfa42620a3f6..940781183fac 100644 --- a/drivers/staging/comedi/drivers/das1800.c +++ b/drivers/staging/comedi/drivers/das1800.c @@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev) if (index == das1801hc || index == das1802hc) return board; index = das1801hc; + break; default: dev_err(dev->class_dev, "Board model: probe returned 0x%x (unknown, please report)\n", diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c index 9c934e6d2ea1..c61add46b426 100644 --- a/drivers/staging/lustre/lustre/obdclass/debug.c +++ b/drivers/staging/lustre/lustre/obdclass/debug.c @@ -40,7 +40,7 @@ #define DEBUG_SUBSYSTEM D_OTHER -#include +#include #include "../include/obd_support.h" #include "../include/lustre_debug.h" diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index b0c8e235b982..69bdc8f29b59 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { - if (conf->assoc) { + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) && + priv->op_mode != NL80211_IFTYPE_AP) { + if (conf->assoc && conf->beacon_rate) { CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, conf->sync_tsf); diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c index f5296f53a3d2..a1f66addda8d 100644 --- a/drivers/staging/wilc1000/linux_mon.c +++ b/drivers/staging/wilc1000/linux_mon.c @@ -493,7 +493,7 @@ static void WILC_WFI_mon_setup(struct net_device *dev) /* dev->destructor = free_netdev; */ PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n"); ether_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; memset(dev->dev_addr, 0, ETH_ALEN); diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index d5dd357ba57c..b49f97c734d0 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, static struct platform_driver hisi_thermal_driver = { .driver = { .name = "hisi_thermal", - .owner = THIS_MODULE, .pm = &hisi_thermal_pm_ops, .of_match_table = of_hisi_thermal_match, }, diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 4672250b329f..63a448f9d93b 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz, struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; u32 *req_power, *max_power, *granted_power, *extra_actor_power; - u32 total_req_power, max_allocatable_power; + u32 *weighted_req_power; + u32 total_req_power, max_allocatable_power, total_weighted_req_power; u32 total_granted_power, power_range; int i, num_actors, total_weight, ret = 0; int trip_max_desired_temperature = params->trip_max_desired_temperature; @@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz, } /* - * We need to allocate three arrays of the same size: - * req_power, max_power and granted_power. They are going to - * be needed until this function returns. Allocate them all - * in one go to simplify the allocation and deallocation - * logic. + * We need to allocate five arrays of the same size: + * req_power, max_power, granted_power, extra_actor_power and + * weighted_req_power. They are going to be needed until this + * function returns. Allocate them all in one go to simplify + * the allocation and deallocation logic. */ BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); - req_power = devm_kcalloc(&tz->device, num_actors * 4, + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); + req_power = devm_kcalloc(&tz->device, num_actors * 5, sizeof(*req_power), GFP_KERNEL); if (!req_power) { ret = -ENOMEM; @@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz, max_power = &req_power[num_actors]; granted_power = &req_power[2 * num_actors]; extra_actor_power = &req_power[3 * num_actors]; + weighted_req_power = &req_power[4 * num_actors]; i = 0; + total_weighted_req_power = 0; total_req_power = 0; max_allocatable_power = 0; @@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz, else weight = instance->weight; - req_power[i] = frac_to_int(weight * req_power[i]); + weighted_req_power[i] = frac_to_int(weight * req_power[i]); if (power_actor_get_max_power(cdev, tz, &max_power[i])) continue; total_req_power += req_power[i]; max_allocatable_power += max_power[i]; + total_weighted_req_power += weighted_req_power[i]; i++; } @@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz, power_range = pid_controller(tz, current_temp, control_temp, max_allocatable_power); - divvy_up_power(req_power, max_power, num_actors, total_req_power, - power_range, granted_power, extra_actor_power); + divvy_up_power(weighted_req_power, max_power, num_actors, + total_weighted_req_power, power_range, granted_power, + extra_actor_power); total_granted_power = 0; i = 0; diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig index c8e35c1a43dc..e0da3865e060 100644 --- a/drivers/thermal/samsung/Kconfig +++ b/drivers/thermal/samsung/Kconfig @@ -1,6 +1,6 @@ config EXYNOS_THERMAL tristate "Exynos thermal management unit driver" - depends on OF + depends on THERMAL_OF help If you say yes here you get support for the TMU (Thermal Management Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 531f4b179871..c96ff10b869e 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = { static int exynos_tmu_probe(struct platform_device *pdev) { - struct exynos_tmu_platform_data *pdata; struct exynos_tmu_data *data; int ret; @@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) if (ret) goto err_sensor; - pdata = data->pdata; - INIT_WORK(&data->irq_work, exynos_tmu_work); data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); @@ -1392,6 +1389,8 @@ err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); err_sensor: + if (!IS_ERR_OR_NULL(data->regulator)) + regulator_disable(data->regulator); thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); return ret; diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 04659bfb888b..4ca211be4c0f 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, return -ENODEV; unbind: + device_remove_file(&tz->device, &pos->weight_attr); device_remove_file(&tz->device, &pos->attr); sysfs_remove_link(&tz->device.kobj, pos->name); release_idr(&tz->idr, &tz->lock, pos->id); diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 74fea4fa41b1..3ad48e1c0c57 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = { }, }; -module_platform_driver(ci_hdrc_driver); +static int __init ci_hdrc_platform_register(void) +{ + ci_hdrc_host_driver_init(); + return platform_driver_register(&ci_hdrc_driver); +} +module_init(ci_hdrc_platform_register); + +static void __exit ci_hdrc_platform_unregister(void) +{ + platform_driver_unregister(&ci_hdrc_driver); +} +module_exit(ci_hdrc_platform_unregister); MODULE_ALIAS("platform:ci_hdrc"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index 6cf87b8b13a8..7161439def19 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci) rdrv->name = "host"; ci->roles[CI_ROLE_HOST] = rdrv; + return 0; +} + +void ci_hdrc_host_driver_init(void) +{ ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; - - return 0; } diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h index 5707bf379bfb..0f12f131bdd3 100644 --- a/drivers/usb/chipidea/host.h +++ b/drivers/usb/chipidea/host.h @@ -5,6 +5,7 @@ int ci_hdrc_host_init(struct ci_hdrc *ci); void ci_hdrc_host_destroy(struct ci_hdrc *ci); +void ci_hdrc_host_driver_init(void); #else @@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci) } +static void ci_hdrc_host_driver_init(void) +{ + +} + #endif #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index f7f35a36c09a..6df9715a4bcd 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -699,6 +699,10 @@ static inline int hidg_get_minor(void) int ret; ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + if (ret >= HIDG_MINORS) { + ida_simple_remove(&hidg_ida, ret); + ret = -ENODEV; + } return ret; } diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 44173df27273..357f63f47b42 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = { static inline int gprinter_get_minor(void) { - return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + int ret; + + ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); + if (ret >= PRINTER_MINORS) { + ida_simple_remove(&printer_ida, ret); + ret = -ENODEV; + } + + return ret; } static inline void gprinter_put_minor(int minor) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 6d3eb8b00a48..531861547253 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt) factor = 1000; } else { ep_desc = &hs_epin_desc; - factor = 125; + factor = 8000; } /* pre-compute some values for iso_complete() */ uac2->p_framesize = opts->p_ssize * num_channels(opts->p_chmask); rate = opts->p_srate * uac2->p_framesize; - uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; + uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1)); uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, prm->max_psize); diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index b04980cf6dc4..1efa61265d8d 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) /* The current hw dequeue pointer */ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); deq_ptr_64 = tmp_32; - tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); + tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0)); deq_ptr_64 |= ((u64)tmp_32 << 32); /* we have the dma addr of next bd that will be fetched by hardware */ diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 362ee8af5fce..89ed5e71a199 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -323,6 +323,7 @@ err4: err3: put_device(&udc->dev); + device_del(&gadget->dev); err2: put_device(&gadget->dev); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3e442f77a2b9..9a8c936cd42c 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) int size; int i, j, num_ports; - del_timer_sync(&xhci->cmd_timer); + if (timer_pending(&xhci->cmd_timer)) + del_timer_sync(&xhci->cmd_timer); /* Free the Event Ring Segment Table and the actual Event Ring */ size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6a8fc52aed58..32f4d564494a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return 0; /* offset in TRBs */ segment_offset = trb - seg->trbs; - if (segment_offset > TRBS_PER_SEGMENT) + if (segment_offset >= TRBS_PER_SEGMENT) return 0; return seg->dma + (segment_offset * sizeof(*trb)); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 19b85ee98a72..876423b8892c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 9c63897b3a56..d156545728c2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ - {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */ {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ @@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ /* Huawei devices */ {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 46179a0828eb..07d1ecd564f7 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, + { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ /* AT&T Direct IP LTE modems */ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 658c34bb9076..1aaf89300621 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode) int y; int c = scr_readw((u16 *) vc->vc_pos); + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) return; - ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); if (vc->vc_cursor_type & 0x10) fbcon_del_cursor_timer(info); else diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 2d98de535e0f..f888561568d9 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -298,7 +298,7 @@ config FB_ARMCLCD # Helper logic selected only by the ARM Versatile platform family. config PLAT_VERSATILE_CLCD - def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR depends on ARM depends on FB_ARMCLCD && FB=y diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c index 928ee639c0c1..bf407b6ba15c 100644 --- a/drivers/video/fbdev/omap2/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/dss/dss-of.c @@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent, } prev = port; } while (of_node_cmp(port->name, "port") != 0); + + of_node_put(ports); } return port; @@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) if (!port) return NULL; - np = of_get_next_parent(port); + np = of_get_parent(port); for (i = 0; i < 2 && np; ++i) { struct property *prop; diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 86bd457d039d..50bce45e7f3d 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) goto err_free_dma; } - ret = clk_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); if (ret < 0) { dev_err(dev, "failed to enable clock\n"); goto err_misc_deregister; @@ -685,7 +685,7 @@ err_misc_deregister: misc_deregister(&priv->misc_dev); err_disable_clk: - clk_disable(priv->clk); + clk_disable_unprepare(priv->clk); return ret; } diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c index 111c2d1911d3..b5102aa6090d 100644 --- a/drivers/video/of_videomode.c +++ b/drivers/video/of_videomode.c @@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm, index = disp->native_mode; ret = videomode_from_timings(disp, vm, index); - if (ret) - return ret; display_timings_release(disp); - return 0; + return ret; } EXPORT_SYMBOL_GPL(of_get_videomode); diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index 60e2a1677563..c96944b59856 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -313,6 +313,7 @@ err_init_vq: static void virtinput_remove(struct virtio_device *vdev) { struct virtio_input *vi = vdev->priv; + void *buf; unsigned long flags; spin_lock_irqsave(&vi->lock, flags); @@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev) spin_unlock_irqrestore(&vi->lock, flags); input_unregister_device(vi->idev); + vdev->config->reset(vdev); + while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL) + kfree(buf); vdev->config->del_vqs(vdev); kfree(vi); } diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index fd933695f232..bf4a23c7c591 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) } /* - * We avoid multiple worker processes conflicting via the balloon mutex. + * As this is a work item it is guaranteed to run as a single instance only. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. @@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) enum bp_state state = BP_DONE; long credit; - mutex_lock(&balloon_mutex); do { + mutex_lock(&balloon_mutex); + credit = current_credit(); if (credit > 0) { @@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) state = update_schedule(state); -#ifndef CONFIG_PREEMPT - if (need_resched()) - schedule(); -#endif + mutex_unlock(&balloon_mutex); + + cond_resched(); + } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ if (state == BP_EAGAIN) schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); - - mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 67b9163db718..0dbb222daaf1 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) pr_debug("priv %p\n", priv); + mutex_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); list_del(&map->next); gntdev_put_map(NULL /* already removed */, map); } WARN_ON(!list_empty(&priv->freeable_maps)); + mutex_unlock(&priv->lock); if (use_ptemod) mmu_notifier_unregister(&priv->mn, priv->mm); diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9ad327238ba9..e30353575d5d 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, addrs); - if (!rv) + if (!rv) { vunmap(vaddr); + free_xenballooned_pages(node->nr_handles, node->hvm.pages); + } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, node->nr_handles); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e9ace099162c..8a8202956576 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, /* Exclusive -> exclusive, nothing changed */ } } + + /* For exclusive extent, free its reserved bytes too */ + if (nr_old_roots == 0 && nr_new_roots == 1 && + cur_new_count == nr_new_roots) + qg->reserved -= num_bytes; if (dirty) qgroup_dirty(fs_info, qg); } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index dc10c9dd36c1..ddd5e9471290 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode, swap(cf, ci->i_prealloc_cap_flush); cf->caps = flushing; - cf->kick = false; spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); @@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, static int __kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, - struct ceph_inode_info *ci, - bool kick_all) + struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; @@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { cf = rb_entry(n, struct ceph_cap_flush, i_node); - if (cf->tid < first_tid) - continue; - if (kick_all || cf->kick) + if (cf->tid >= first_tid) break; } if (!n) { @@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc, } cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = false; first_tid = cf->tid + 1; @@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, { struct ceph_inode_info *ci; struct ceph_cap *cap; - struct ceph_cap_flush *cf; - struct rb_node *n; dout("early_kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { @@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, if ((cap->issued & ci->i_flushing_caps) != ci->i_flushing_caps) { spin_unlock(&ci->i_ceph_lock); - if (!__kick_flushing_caps(mdsc, session, ci, true)) + if (!__kick_flushing_caps(mdsc, session, ci)) continue; spin_lock(&ci->i_ceph_lock); } - for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { - cf = rb_entry(n, struct ceph_cap_flush, i_node); - cf->kick = true; - } - spin_unlock(&ci->i_ceph_lock); } } @@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, dout("kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { - int delayed = __kick_flushing_caps(mdsc, session, ci, false); + int delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); @@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, spin_unlock(&ci->i_ceph_lock); - delayed = __kick_flushing_caps(mdsc, session, ci, true); + delayed = __kick_flushing_caps(mdsc, session, ci); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 4347039ecc18..6706bde9ad1b 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, return 0; spin_lock(&ctx->flc_lock); - list_for_each_entry(lock, &ctx->flc_flock, fl_list) { + list_for_each_entry(lock, &ctx->flc_posix, fl_list) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 860cc016e70d..2f2460d23a06 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) struct ceph_cap_flush { u64 tid; int caps; - bool kick; struct rb_node g_node; // global union { struct rb_node i_node; // inode diff --git a/fs/dcache.c b/fs/dcache.c index 5c8ea15e73a5..9b5fe503f6cb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void) inode_init_early(); } -void __init vfs_caches_init(unsigned long mempages) +void __init vfs_caches_init(void) { - unsigned long reserve; - - /* Base hash sizes on available memory, with a reserve equal to - 150% of current kernel size */ - - reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1); - mempages -= reserve; - names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); dcache_init(); inode_init(); - files_init(mempages); + files_init(); + files_maxfiles_init(); mnt_init(); bdev_cache_init(); chrdev_init(); diff --git a/fs/file_table.c b/fs/file_table.c index 7f9d407c7595..ad17e05ebf95 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -308,19 +309,24 @@ void put_filp(struct file *file) } } -void __init files_init(unsigned long mempages) +void __init files_init(void) { - unsigned long n; - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); - - /* - * One file with associated inode and dcache is very roughly 1K. - * Per default don't use more than 10% of our memory for files. - */ - - n = (mempages * (PAGE_SIZE / 1024)) / 10; - files_stat.max_files = max_t(unsigned long, n, NR_FILE); percpu_counter_init(&nr_files, 0, GFP_KERNEL); +} + +/* + * One file with associated inode and dcache is very roughly 1K. Per default + * do not use more than 10% of our memory for files. + */ +void __init files_maxfiles_init(void) +{ + unsigned long n; + unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2; + + memreserve = min(memreserve, totalram_pages - 1); + n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10; + + files_stat.max_files = max_t(unsigned long, n, NR_FILE); } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 0cf74df68617..973c24ce59ad 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size, inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; + if (creat_flags == HUGETLB_SHMFS_INODE) + inode->i_flags |= S_PRIVATE; file = ERR_PTR(-ENOMEM); if (hugetlb_reserve_pages(inode, 0, diff --git a/fs/namei.c b/fs/namei.c index ae4e4c18b2ac..1c2105ed20c5 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd) return 0; /* Allowed if parent directory not sticky and world-writable. */ - parent = nd->path.dentry->d_inode; + parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; @@ -1954,8 +1954,13 @@ OK: continue; } } - if (unlikely(!d_can_lookup(nd->path.dentry))) + if (unlikely(!d_can_lookup(nd->path.dentry))) { + if (nd->flags & LOOKUP_RCU) { + if (unlazy_walk(nd, NULL, 0)) + return -ECHILD; + } return -ENOTDIR; + } } } diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 6904213a4363..ebf90e487c75 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, BUG_ON(!ls->ls_file); if (nfsd4_layout_setlease(ls)) { + fput(ls->ls_file); put_nfs4_file(fp); kmem_cache_free(nfs4_layout_stateid_cache, ls); return NULL; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 61dfb33f0559..95202719a1fd 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry) queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } -static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) +static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) { - if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) + if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } @@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags) { __be32 status; - status = nfs4_check_fh(fhp, ols); - if (status) - return status; status = nfsd4_check_openowner_confirmed(ols); if (status) return status; @@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, status = nfserr_bad_stateid; break; } + if (status) + goto out; + status = nfs4_check_fh(fhp, s); done: if (!status && filpp) @@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status) return status; - return nfs4_check_fh(current_fh, stp); + return nfs4_check_fh(current_fh, &stp->st_stid); } /* diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 54633858733a..75e0563c09d1 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ FATTR4_WORD0_RDATTR_ERROR) #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID +#define WORD2_ABSENT_FS_ATTRS 0 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL static inline __be32 @@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, { return 0; } #endif -static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) +static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err) { /* As per referral draft: */ if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || @@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) } *bmval0 &= WORD0_ABSENT_FS_ATTRS; *bmval1 &= WORD1_ABSENT_FS_ATTRS; + *bmval2 &= WORD2_ABSENT_FS_ATTRS; return 0; } @@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); if (exp->ex_fslocs.migrated) { - BUG_ON(bmval[2]); - status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err); + status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err); if (status) goto out; } @@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, } #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || - bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { + if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) || + bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { err = security_inode_getsecctx(d_inode(dentry), &context, &contextlen); contextsupport = (err == 0); diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 92e48c70f0f0..39ddcaf0918f 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) { struct fsnotify_mark *lmark, *mark; + LIST_HEAD(to_free); + /* + * We have to be really careful here. Anytime we drop mark_mutex, e.g. + * fsnotify_clear_marks_by_inode() can come and free marks. Even in our + * to_free list so we have to use mark_mutex even when accessing that + * list. And freeing mark requires us to drop mark_mutex. So we can + * reliably free only the first mark in the list. That's why we first + * move marks to free to to_free list in one go and then free marks in + * to_free list one by one. + */ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { - if (mark->flags & flags) { - fsnotify_get_mark(mark); - fsnotify_destroy_mark_locked(mark, group); - fsnotify_put_mark(mark); - } + if (mark->flags & flags) + list_move(&mark->g_list, &to_free); } mutex_unlock(&group->mark_mutex); + + while (1) { + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + if (list_empty(&to_free)) { + mutex_unlock(&group->mark_mutex); + break; + } + mark = list_first_entry(&to_free, struct fsnotify_mark, g_list); + fsnotify_get_mark(mark); + fsnotify_destroy_mark_locked(mark, group); + mutex_unlock(&group->mark_mutex); + fsnotify_put_mark(mark); + } } /* diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1a35c6139656..0f5fd9db8194 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb, if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { u64 s = i_size_read(inode); - sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + + sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) + (do_div(s, osb->s_clustersize) >> 9); ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, @@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); ret = blkdev_issue_zeroout(osb->sb->s_bdev, - p_cpos << (osb->s_clustersize_bits - 9), + (u64)p_cpos << (osb->s_clustersize_bits - 9), zero_len_head >> 9, GFP_NOFS, false); if (ret < 0) mlog_errno(ret); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8b23aa2f52dd..23157e40dd74 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) osb->dc_work_sequence = osb->dc_wake_sequence; processed = osb->blocked_lock_count; - while (processed) { - BUG_ON(list_empty(&osb->blocked_lock_list)); - + /* + * blocked lock processing in this loop might call iput which can + * remove items off osb->blocked_lock_list. Downconvert up to + * 'processed' number of locks, but stop short if we had some + * removed in ocfs2_mark_lockres_freeing when downconverting. + */ + while (processed && !list_empty(&osb->blocked_lock_list)) { lockres = list_entry(osb->blocked_lock_list.next, struct ocfs2_lock_res, l_blocked_list); list_del_init(&lockres->l_blocked_list); diff --git a/fs/signalfd.c b/fs/signalfd.c index 7e412ad74836..270221fcef42 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (kinfo->si_code == BUS_MCEERR_AR || - kinfo->si_code == BUS_MCEERR_AO) + if (kinfo->si_signo == SIGBUS && + (kinfo->si_code == BUS_MCEERR_AR || + kinfo->si_code == BUS_MCEERR_AO)) err |= __put_user((short) kinfo->si_addr_lsb, &uinfo->ssi_addr_lsb); #endif diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 48db6a56975f..5aa519711e0b 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -691,7 +691,7 @@ struct drm_vblank_crtc { struct timer_list disable_timer; /* delayed disable timer */ /* vblank counter, protected by dev->vblank_time_lock for writes */ - unsigned long count; + u32 count; /* vblank timestamps, protected by dev->vblank_time_lock for writes */ struct timeval time[DRM_VBLANKTIME_RBSIZE]; diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index c8fc187061de..918aa68b5199 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs { * @get_modes: get mode list for this connector * @mode_valid: is this mode valid on the given connector? (optional) * @best_encoder: return the preferred encoder for this connector + * @atomic_best_encoder: atomic version of @best_encoder * * The helper operations are called by the mid-layer CRTC helper. */ @@ -176,6 +177,8 @@ struct drm_connector_helper_funcs { enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); }; extern void drm_helper_disable_unused_functions(struct drm_device *dev); diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 45c39a37f924..8bc073d297db 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -172,6 +172,7 @@ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/linux/average.h b/include/linux/average.h index c6028fd742c1..d04aa58280de 100644 --- a/include/linux/average.h +++ b/include/linux/average.h @@ -3,28 +3,43 @@ /* Exponentially weighted moving average (EWMA) */ -/* For more documentation see lib/average.c */ - -struct ewma { - unsigned long internal; - unsigned long factor; - unsigned long weight; -}; - -extern void ewma_init(struct ewma *avg, unsigned long factor, - unsigned long weight); - -extern struct ewma *ewma_add(struct ewma *avg, unsigned long val); - -/** - * ewma_read() - Get average value - * @avg: Average structure - * - * Returns the average value held in @avg. - */ -static inline unsigned long ewma_read(const struct ewma *avg) -{ - return avg->internal >> avg->factor; -} +#define DECLARE_EWMA(name, _factor, _weight) \ + struct ewma_##name { \ + unsigned long internal; \ + }; \ + static inline void ewma_##name##_init(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + e->internal = 0; \ + } \ + static inline unsigned long \ + ewma_##name##_read(struct ewma_##name *e) \ + { \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + return e->internal >> ilog2(_factor); \ + } \ + static inline void ewma_##name##_add(struct ewma_##name *e, \ + unsigned long val) \ + { \ + unsigned long internal = ACCESS_ONCE(e->internal); \ + unsigned long weight = ilog2(_weight); \ + unsigned long factor = ilog2(_factor); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_factor)); \ + BUILD_BUG_ON(!__builtin_constant_p(_weight)); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_factor); \ + BUILD_BUG_ON_NOT_POWER_OF_2(_weight); \ + \ + ACCESS_ONCE(e->internal) = internal ? \ + (((internal << weight) - internal) + \ + (val << factor)) >> weight : \ + (val << factor); \ + } #endif /* _LINUX_AVERAGE_H */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 6cceedf65ca2..cf038431a5cc 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -640,7 +640,6 @@ struct bcma_drv_cc { spinlock_t gpio_lock; #ifdef CONFIG_BCMA_DRIVER_GPIO struct gpio_chip gpio; - struct irq_domain *irq_domain; #endif }; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 139d6d2e123f..f57d7fed9ec3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -10,6 +10,7 @@ #include #include #include +#include struct bpf_map; @@ -24,6 +25,10 @@ struct bpf_map_ops { void *(*map_lookup_elem)(struct bpf_map *map, void *key); int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); int (*map_delete_elem)(struct bpf_map *map, void *key); + + /* funcs called by prog_array and perf_event_array map */ + void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); + void (*map_fd_put_ptr) (void *ptr); }; struct bpf_map { @@ -142,13 +147,13 @@ struct bpf_array { bool owner_jited; union { char value[0] __aligned(8); - struct bpf_prog *prog[0] __aligned(8); + void *ptrs[0] __aligned(8); }; }; #define MAX_TAIL_CALL_CNT 32 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); -void bpf_prog_array_map_clear(struct bpf_map *map); +void bpf_fd_array_map_clear(struct bpf_map *map); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -185,6 +190,7 @@ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; extern const struct bpf_func_proto bpf_map_delete_elem_proto; +extern const struct bpf_func_proto bpf_perf_event_read_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 9012f8775208..eb049c622208 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -76,7 +76,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr) #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) return (((*(const u32 *)addr) ^ (*(const u32 *)b)) | - ((a[2] ^ b[2]) & m)) == 0; + (__force int)((a[2] ^ b[2]) & m)) == 0; #else return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index cc008c338f5a..84b783f277f7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -55,7 +55,8 @@ struct vm_fault; extern void __init inode_init(void); extern void __init inode_init_early(void); -extern void __init files_init(unsigned long); +extern void __init files_init(void); +extern void __init files_maxfiles_init(void); extern struct files_stat_struct files_stat; extern unsigned long get_max_files(void); @@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp); /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); -extern void __init vfs_caches_init(unsigned long); +extern void __init vfs_caches_init(void); extern struct kmem_cache *names_cachep; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index b9c7897dc566..cfa906f28b7a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2074,8 +2074,8 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7) +#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(5) #define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) -#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index cb9dcad72372..f1f32af6d9b9 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -31,6 +31,7 @@ struct ipv6_devconf { __s32 accept_ra_defrtr; __s32 accept_ra_min_hop_limit; __s32 accept_ra_pinfo; + __s32 ignore_routes_with_linkdown; #ifdef CONFIG_IPV6_ROUTER_PREF __s32 accept_ra_rtr_pref; __s32 rtr_probe_interval; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2039546b0ec6..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -103,6 +103,7 @@ enum { MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, MLX5_REG_PAOS = 0x5006, + MLX5_REG_PFCC = 0x5007, MLX5_REG_PPCNT = 0x5008, MLX5_REG_PMAOS = 0x5012, MLX5_REG_PUDE = 0x5009, @@ -152,8 +153,8 @@ enum mlx5_dev_event { }; enum mlx5_port_status { - MLX5_PORT_UP = 1 << 1, - MLX5_PORT_DOWN = 1 << 2, + MLX5_PORT_UP = 1, + MLX5_PORT_DOWN = 2, }; struct mlx5_uuar_info { @@ -761,9 +762,10 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, u8 local_port); int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, int proto_mask); -int mlx5_set_port_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status); -int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); @@ -773,6 +775,10 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, u8 *vl_hw_cap, u8 local_port); +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 607b5f41f46f..4bd177faa90c 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1262,6 +1262,8 @@ struct net_device_ops { * @IFF_LIVE_ADDR_CHANGE: device supports hardware address * change when it's running * @IFF_MACVLAN: Macvlan device + * @IFF_VRF_MASTER: device is a VRF master + * @IFF_NO_QUEUE: device can run without qdisc attached */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1289,6 +1291,8 @@ enum netdev_priv_flags { IFF_XMIT_DST_RELEASE_PERM = 1<<22, IFF_IPVLAN_MASTER = 1<<23, IFF_IPVLAN_SLAVE = 1<<24, + IFF_VRF_MASTER = 1<<25, + IFF_NO_QUEUE = 1<<26, }; #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN @@ -1316,6 +1320,8 @@ enum netdev_priv_flags { #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM #define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER #define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE +#define IFF_VRF_MASTER IFF_VRF_MASTER +#define IFF_NO_QUEUE IFF_NO_QUEUE /** * struct net_device - The DEVICE structure. @@ -1432,6 +1438,7 @@ enum netdev_priv_flags { * @dn_ptr: DECnet specific data * @ip6_ptr: IPv6 specific data * @ax25_ptr: AX.25 specific data + * @vrf_ptr: VRF specific data * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering * * @last_rx: Time of last Rx @@ -1650,6 +1657,7 @@ struct net_device { struct dn_dev __rcu *dn_ptr; struct inet6_dev __rcu *ip6_ptr; void *ax25_ptr; + struct net_vrf_dev __rcu *vrf_ptr; struct wireless_dev *ieee80211_ptr; struct wpan_dev *ieee802154_ptr; #if IS_ENABLED(CONFIG_MPLS_ROUTING) @@ -3808,6 +3816,32 @@ static inline bool netif_supports_nofcs(struct net_device *dev) return dev->priv_flags & IFF_SUPP_NOFCS; } +static inline bool netif_is_vrf(const struct net_device *dev) +{ + return dev->priv_flags & IFF_VRF_MASTER; +} + +static inline bool netif_index_is_vrf(struct net *net, int ifindex) +{ + bool rc = false; + +#if IS_ENABLED(CONFIG_NET_VRF) + struct net_device *dev; + + if (ifindex == 0) + return false; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + rc = netif_is_vrf(dev); + + rcu_read_unlock(); +#endif + return rc; +} + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ static inline void netif_keep_dst(struct net_device *dev) { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f34e040b34e9..41c93844fb1d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) 1 << PG_private | 1 << PG_private_2 | \ 1 << PG_writeback | 1 << PG_reserved | \ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ - 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + 1 << PG_unevictable | __PG_MLOCKED | \ __PG_COMPOUND_LOCK) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have any flags set. It they are set, + * Pages being prepped should not have these flags set. It they are set, * there has been a kernel bug or struct page corruption. + * + * __PG_HWPOISON is exceptional because it needs to be kept beyond page's + * alloc-free cycle to prevent from reusing the page. */ -#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) +#define PAGE_FLAGS_CHECK_AT_PREP \ + (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1 << PG_private | 1 << PG_private_2) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2027809433b3..092a0e8a479a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -641,6 +641,8 @@ extern int perf_event_init_task(struct task_struct *child); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); +extern struct perf_event *perf_event_get(unsigned int fd); +extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); @@ -659,6 +661,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); +extern u64 perf_event_read_local(struct perf_event *event); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); @@ -979,6 +982,12 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } +static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } +static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) +{ + return ERR_PTR(-EINVAL); +} +static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } @@ -1011,6 +1020,7 @@ static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } +static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL) diff --git a/include/linux/property.h b/include/linux/property.h index 76ebde9c11d4..a59c6ee566c2 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -166,4 +166,8 @@ void device_add_property_set(struct device *dev, struct property_set *pset); bool device_dma_is_coherent(struct device *dev); +int device_get_phy_mode(struct device *dev); + +void *device_get_mac_address(struct device *dev, char *addr, int alen); + #endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df9fdf557689..065e10b81a77 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2913,11 +2913,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * * PHY drivers may accept clones of transmitted packets for * timestamping via their phy_driver.txtstamp method. These drivers - * must call this function to return the skb back to the stack, with - * or without a timestamp. + * must call this function to return the skb back to the stack with a + * timestamp. * * @skb: clone of the the original outgoing packet - * @hwtstamps: hardware time stamps, may be NULL if not available + * @hwtstamps: hardware time stamps * */ void skb_complete_tx_timestamp(struct sk_buff *skb, diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h index dc03d77ad23b..a2f59ec98d24 100644 --- a/include/net/6lowpan.h +++ b/include/net/6lowpan.h @@ -197,6 +197,27 @@ #define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */ #define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */ +#define LOWPAN_PRIV_SIZE(llpriv_size) \ + (sizeof(struct lowpan_priv) + llpriv_size) + +enum lowpan_lltypes { + LOWPAN_LLTYPE_BTLE, + LOWPAN_LLTYPE_IEEE802154, +}; + +struct lowpan_priv { + enum lowpan_lltypes lltype; + + /* must be last */ + u8 priv[0] __aligned(sizeof(void *)); +}; + +static inline +struct lowpan_priv *lowpan_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + #ifdef DEBUG /* print data in line */ static inline void raw_dump_inline(const char *caller, char *msg, @@ -372,6 +393,8 @@ lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset) return skb->len + uncomp_header - ret; } +void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); + int lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, const u8 *saddr, const u8 saddr_type, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 2a6b0919e23f..9e1a59e01fa2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -512,9 +512,11 @@ struct hci_conn_params { HCI_AUTO_CONN_DIRECT, HCI_AUTO_CONN_ALWAYS, HCI_AUTO_CONN_LINK_LOSS, + HCI_AUTO_CONN_EXPLICIT, } auto_connect; struct hci_conn *conn; + bool explicit_connect; }; extern struct list_head hci_dev_list; @@ -639,6 +641,7 @@ enum { HCI_CONN_DROP, HCI_CONN_PARAM_REMOVAL_PEND, HCI_CONN_NEW_LINK_KEY, + HCI_CONN_SCANNING, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -808,6 +811,26 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + !test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); @@ -823,6 +846,9 @@ void hci_chan_del(struct hci_chan *chan); void hci_chan_list_flush(struct hci_conn *conn); struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); +struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, u8 sec_level, + u16 conn_timeout, u8 role); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, u8 role); @@ -988,6 +1014,9 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev); struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type); +struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev, + bdaddr_t *addr, + u8 addr_type); void hci_uuids_clear(struct hci_dev *hdev); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 883fe1e7c5a1..f0889a247643 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2369,8 +2369,7 @@ struct cfg80211_qos_map { * method returns 0.) * * @mgmt_frame_register: Notify driver that a management frame type was - * registered. Note that this callback may not sleep, and cannot run - * concurrently with itself. + * registered. The callback is allowed to sleep. * * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 382f94b59f2f..76b1ffaea863 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -63,6 +63,8 @@ struct cfg802154_ops { s8 max_frame_retries); int (*set_lbt_mode)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool mode); + int (*set_ackreq_default)(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, bool ackreq); }; static inline bool @@ -173,6 +175,9 @@ struct wpan_dev { struct list_head list; struct net_device *netdev; + /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */ + struct net_device *lowpan_dev; + u32 identifier; /* MAC PIB */ @@ -193,6 +198,9 @@ struct wpan_dev { bool lbt; bool promiscuous_mode; + + /* fallback for acknowledgment bit setting */ + bool ackreq; }; #define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) diff --git a/include/net/checksum.h b/include/net/checksum.h index 2d1d73cb773e..9fcaedf994ee 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -140,14 +140,16 @@ static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr); + __be32 from, __be32 to, bool pseudohdr); void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, - int pseudohdr); + bool pseudohdr); +void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, + __wsum diff, bool pseudohdr); static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, - int pseudohdr) + bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); diff --git a/include/net/dsa.h b/include/net/dsa.h index fbca63ba8f73..b34d812bc5d0 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -171,6 +171,11 @@ static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); } +static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) +{ + return !!((ds->dsa_port_mask) & (1 << p)); +} + static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) { return ds->phys_port_mask & (1 << p) && ds->ports[p]; @@ -296,12 +301,28 @@ struct dsa_switch_driver { u32 br_port_mask); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); - int (*fdb_add)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); - int (*fdb_del)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); - int (*fdb_getnext)(struct dsa_switch *ds, int port, - unsigned char *addr, bool *is_static); + + /* + * VLAN support + */ + int (*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid); + int (*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid); + int (*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid, + bool untagged); + int (*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid); + int (*vlan_getnext)(struct dsa_switch *ds, u16 *vid, + unsigned long *ports, unsigned long *untagged); + + /* + * Forwarding database + */ + int (*port_fdb_add)(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); + int (*port_fdb_del)(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); + int (*port_fdb_getnext)(struct dsa_switch *ds, int port, + unsigned char *addr, u16 *vid, + bool *is_static); }; void register_switch_driver(struct dsa_switch_driver *type); diff --git a/include/net/flow.h b/include/net/flow.h index 3098ae33a178..f305588fc162 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -33,6 +33,7 @@ struct flowi_common { __u8 flowic_flags; #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_KNOWN_NH 0x02 +#define FLOWI_FLAG_VRFSRC 0x04 __u32 flowic_secid; struct flowi_tunnel flowic_tun_key; }; diff --git a/include/net/gre.h b/include/net/gre.h index b53182018743..97eafdc47eea 100644 --- a/include/net/gre.h +++ b/include/net/gre.h @@ -4,6 +4,12 @@ #include #include +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +}; +#define GRE_HEADER_SECTION 4 + #define GREPROTO_CISCO 0 #define GREPROTO_PPTP 1 #define GREPROTO_MAX 2 @@ -14,91 +20,9 @@ struct gre_protocol { void (*err_handler)(struct sk_buff *skb, u32 info); }; -struct gre_base_hdr { - __be16 flags; - __be16 protocol; -}; -#define GRE_HEADER_SECTION 4 - int gre_add_protocol(const struct gre_protocol *proto, u8 version); int gre_del_protocol(const struct gre_protocol *proto, u8 version); -struct gre_cisco_protocol { - int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi); - int (*err_handler)(struct sk_buff *skb, u32 info, - const struct tnl_ptk_info *tpi); - u8 priority; -}; - -int gre_cisco_register(struct gre_cisco_protocol *proto); -int gre_cisco_unregister(struct gre_cisco_protocol *proto); - -void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, - int hdr_len); - -static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb, - bool csum) -{ - return iptunnel_handle_offloads(skb, csum, - csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); -} - - -static inline int ip_gre_calc_hlen(__be16 o_flags) -{ - int addend = 4; - - if (o_flags&TUNNEL_CSUM) - addend += 4; - if (o_flags&TUNNEL_KEY) - addend += 4; - if (o_flags&TUNNEL_SEQ) - addend += 4; - return addend; -} - -static inline __be16 gre_flags_to_tnl_flags(__be16 flags) -{ - __be16 tflags = 0; - - if (flags & GRE_CSUM) - tflags |= TUNNEL_CSUM; - if (flags & GRE_ROUTING) - tflags |= TUNNEL_ROUTING; - if (flags & GRE_KEY) - tflags |= TUNNEL_KEY; - if (flags & GRE_SEQ) - tflags |= TUNNEL_SEQ; - if (flags & GRE_STRICT) - tflags |= TUNNEL_STRICT; - if (flags & GRE_REC) - tflags |= TUNNEL_REC; - if (flags & GRE_VERSION) - tflags |= TUNNEL_VERSION; - - return tflags; -} - -static inline __be16 tnl_flags_to_gre_flags(__be16 tflags) -{ - __be16 flags = 0; - - if (tflags & TUNNEL_CSUM) - flags |= GRE_CSUM; - if (tflags & TUNNEL_ROUTING) - flags |= GRE_ROUTING; - if (tflags & TUNNEL_KEY) - flags |= GRE_KEY; - if (tflags & TUNNEL_SEQ) - flags |= GRE_SEQ; - if (tflags & TUNNEL_STRICT) - flags |= GRE_STRICT; - if (tflags & TUNNEL_REC) - flags |= GRE_REC; - if (tflags & TUNNEL_VERSION) - flags |= GRE_VERSION; - - return flags; -} - +struct net_device *gretap_fb_dev_create(struct net *net, const char *name, + u8 name_assign_type); #endif diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 47984415f5d1..984dbfa15e13 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -82,6 +82,8 @@ struct ip_tunnel_dst { __be32 saddr; }; +struct metadata_dst; + struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; @@ -115,6 +117,7 @@ struct ip_tunnel { unsigned int prl_count; /* # of entries in PRL */ int ip_tnl_net_id; struct gro_cells gro_cells; + bool collect_md; }; #define TUNNEL_CSUM __cpu_to_be16(0x01) @@ -149,6 +152,7 @@ struct tnl_ptk_info { struct ip_tunnel_net { struct net_device *fb_tunnel_dev; struct hlist_head tunnels[IP_TNL_HASH_SIZE]; + struct ip_tunnel __rcu *collect_md_tun; }; struct ip_tunnel_encap_ops { @@ -235,7 +239,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 key); int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, - const struct tnl_ptk_info *tpi, bool log_ecn_error); + const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, + bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 33bd30963a95..cfee53916ba5 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -11,12 +11,15 @@ #define LWTUNNEL_HASH_SIZE (1 << LWTUNNEL_HASH_BITS) /* lw tunnel state flags */ -#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1 +#define LWTUNNEL_STATE_OUTPUT_REDIRECT BIT(0) +#define LWTUNNEL_STATE_INPUT_REDIRECT BIT(1) struct lwtunnel_state { __u16 type; __u16 flags; atomic_t refcnt; + int (*orig_output)(struct sock *sk, struct sk_buff *skb); + int (*orig_input)(struct sk_buff *); int len; __u8 data[0]; }; @@ -25,6 +28,7 @@ struct lwtunnel_encap_ops { int (*build_state)(struct net_device *dev, struct nlattr *encap, struct lwtunnel_state **ts); int (*output)(struct sock *sk, struct sk_buff *skb); + int (*input)(struct sk_buff *skb); int (*fill_encap)(struct sk_buff *skb, struct lwtunnel_state *lwtstate); int (*get_encap_size)(struct lwtunnel_state *lwtstate); @@ -32,6 +36,11 @@ struct lwtunnel_encap_ops { }; #ifdef CONFIG_LWTUNNEL +static inline void lwtstate_free(struct lwtunnel_state *lws) +{ + kfree(lws); +} + static inline struct lwtunnel_state * lwtstate_get(struct lwtunnel_state *lws) { @@ -47,7 +56,7 @@ static inline void lwtstate_put(struct lwtunnel_state *lws) return; if (atomic_dec_and_test(&lws->refcnt)) - kfree(lws); + lwtstate_free(lws); } static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate) @@ -58,6 +67,13 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate) return false; } +static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate) +{ + if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT)) + return true; + + return false; +} int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, @@ -72,9 +88,15 @@ struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len); int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b); int lwtunnel_output(struct sock *sk, struct sk_buff *skb); int lwtunnel_output6(struct sock *sk, struct sk_buff *skb); +int lwtunnel_input(struct sk_buff *skb); +int lwtunnel_input6(struct sk_buff *skb); #else +static inline void lwtstate_free(struct lwtunnel_state *lws) +{ +} + static inline struct lwtunnel_state * lwtstate_get(struct lwtunnel_state *lws) { @@ -90,6 +112,11 @@ static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate) return false; } +static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate) +{ + return false; +} + static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, unsigned int num) { @@ -142,6 +169,16 @@ static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb) return -EOPNOTSUPP; } +static inline int lwtunnel_input(struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + +static inline int lwtunnel_input6(struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __NET_LWTUNNEL_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 6b1077c2a63f..e3314e516681 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -973,6 +973,10 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame. * If this flag is set, the stack cannot do any replay detection * hence the driver or hardware will have to do that. + * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this + * flag indicates that the PN was verified for replay protection. + * Note that this flag is also currently only supported when a frame + * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set) * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on @@ -997,9 +1001,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference * number (@ampdu_reference) must be populated and be a distinct number for * each A-MPDU - * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes - * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for - * monitoring purposes only * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all * subframes of a single A-MPDU * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU @@ -1039,8 +1040,8 @@ enum mac80211_rx_flags { RX_FLAG_NO_SIGNAL_VAL = BIT(12), RX_FLAG_HT_GF = BIT(13), RX_FLAG_AMPDU_DETAILS = BIT(14), - RX_FLAG_AMPDU_REPORT_ZEROLEN = BIT(15), - RX_FLAG_AMPDU_IS_ZEROLEN = BIT(16), + RX_FLAG_PN_VALIDATED = BIT(15), + /* bit 16 free */ RX_FLAG_AMPDU_LAST_KNOWN = BIT(17), RX_FLAG_AMPDU_IS_LAST = BIT(18), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19), @@ -1491,8 +1492,10 @@ enum ieee80211_key_flags { * - Temporal Authenticator Rx MIC Key (64 bits) * @icv_len: The ICV length for this key type * @iv_len: The IV length for this key type + * @drv_priv: pointer for driver use */ struct ieee80211_key_conf { + void *drv_priv; atomic64_t tx_pn; u32 cipher; u8 icv_len; @@ -1675,7 +1678,6 @@ struct ieee80211_sta_rates { * @tdls: indicates whether the STA is a TDLS peer * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. - * @mfp: indicates whether the STA uses management frame protection or not. * @txq: per-TID data TX queues (if driver uses the TXQ abstraction) */ struct ieee80211_sta { @@ -1693,7 +1695,6 @@ struct ieee80211_sta { struct ieee80211_sta_rates __rcu *rates; bool tdls; bool tdls_initiator; - bool mfp; struct ieee80211_txq *txq[IEEE80211_NUM_TIDS]; @@ -1888,6 +1889,9 @@ struct ieee80211_txq { * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands * in one command, mac80211 doesn't have to run separate scans per band. * + * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth + * than then BSS bandwidth for a TDLS link on the base channel. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -1920,6 +1924,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_CHANCTX_STA_CSA, IEEE80211_HW_SUPPORTS_CLONED_SKBS, IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS, + IEEE80211_HW_TDLS_WIDER_BW, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -3696,20 +3701,28 @@ void ieee80211_free_hw(struct ieee80211_hw *hw); void ieee80211_restart_hw(struct ieee80211_hw *hw); /** - * ieee80211_napi_add - initialize mac80211 NAPI context - * @hw: the hardware to initialize the NAPI context on - * @napi: the NAPI context to initialize - * @napi_dev: dummy NAPI netdevice, here to not waste the space if the - * driver doesn't use NAPI - * @poll: poll function - * @weight: default weight + * ieee80211_rx_napi - receive frame from NAPI context * - * See also netif_napi_add(). + * Use this function to hand received frames to mac80211. The receive + * buffer in @skb must start with an IEEE 802.11 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee80211 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls to + * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be + * mixed for a single hardware. Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * + * This function must be called with BHs disabled. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac80211 after this call + * @napi: the NAPI context */ -void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight); +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, + struct napi_struct *napi); /** * ieee80211_rx - receive frame @@ -3731,7 +3744,10 @@ void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi, * @hw: the hardware this frame came in on * @skb: the buffer to receive, owned by mac80211 after this call */ -void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb); +static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + ieee80211_rx_napi(hw, skb, NULL); +} /** * ieee80211_rx_irqsafe - receive frame @@ -4314,19 +4330,6 @@ void ieee80211_get_tkip_rx_p1k(struct ieee80211_key_conf *keyconf, void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, struct sk_buff *skb, u8 *p2k); -/** - * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys - * - * This function computes the two AES-CMAC sub-keys, based on the - * previously installed master key. - * - * @keyconf: the parameter passed with the set key - * @k1: a buffer to be filled with the 1st sub-key - * @k2: a buffer to be filled with the 2nd sub-key - */ -void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf, - u8 *k1, u8 *k2); - /** * ieee80211_get_key_tx_seq - get key TX sequence counter * diff --git a/include/net/neighbour.h b/include/net/neighbour.h index bd33e66f49aa..8b683841e574 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -125,6 +125,7 @@ struct neigh_statistics { unsigned long forced_gc_runs; /* number of forced GC runs */ unsigned long unres_discards; /* number of unresolved drops */ + unsigned long table_fulls; /* times even gc couldn't help */ }; #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) diff --git a/include/net/nl802154.h b/include/net/nl802154.h index b0ab530d28cd..cf2713d8b975 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -52,6 +52,8 @@ enum nl802154_commands { NL802154_CMD_SET_LBT_MODE, + NL802154_CMD_SET_ACKREQ_DEFAULT, + /* add new commands above here */ /* used to define NL802154_CMD_MAX below */ @@ -104,6 +106,8 @@ enum nl802154_attrs { NL802154_ATTR_SUPPORTED_COMMANDS, + NL802154_ATTR_ACKREQ_DEFAULT, + /* add attributes here, update the policy in nl802154.c */ __NL802154_ATTR_AFTER_LAST, diff --git a/include/net/route.h b/include/net/route.h index 2d45f419477f..6dda2c1bf8c6 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -189,8 +189,12 @@ void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk); void ip_rt_send_redirect(struct sk_buff *skb); unsigned int inet_addr_type(struct net *net, __be32 addr); +unsigned int inet_addr_type_table(struct net *net, __be32 addr, int tb_id); unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); +unsigned int inet_addr_type_dev_table(struct net *net, + const struct net_device *dev, + __be32 addr); void ip_rt_multicast_event(struct in_device *); int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); @@ -251,6 +255,9 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32 if (inet_sk(sk)->transparent) flow_flags |= FLOWI_FLAG_ANYSRC; + if (netif_index_is_vrf(sock_net(sk), oif)) + flow_flags |= FLOWI_FLAG_VRFSRC; + flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, protocol, flow_flags, dst, src, dport, sport); } diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 89da8934519b..319baab3b48e 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -72,6 +72,7 @@ struct switchdev_obj { struct switchdev_obj_fdb { /* PORT_FDB */ const unsigned char *addr; u16 vid; + u16 ndm_state; } fdb; } u; }; diff --git a/include/net/vrf.h b/include/net/vrf.h new file mode 100644 index 000000000000..5bfb16237fd7 --- /dev/null +++ b/include/net/vrf.h @@ -0,0 +1,178 @@ +/* + * include/net/net_vrf.h - adds vrf dev structure definitions + * Copyright (c) 2015 Cumulus Networks + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_NET_VRF_H +#define __LINUX_NET_VRF_H + +struct net_vrf_dev { + struct rcu_head rcu; + int ifindex; /* ifindex of master dev */ + u32 tb_id; /* table id for VRF */ +}; + +struct slave { + struct list_head list; + struct net_device *dev; +}; + +struct slave_queue { + struct list_head all_slaves; +}; + +struct net_vrf { + struct slave_queue queue; + struct rtable *rth; + u32 tb_id; +}; + + +#if IS_ENABLED(CONFIG_NET_VRF) +/* called with rcu_read_lock() */ +static inline int vrf_master_ifindex_rcu(const struct net_device *dev) +{ + struct net_vrf_dev *vrf_ptr; + int ifindex = 0; + + if (!dev) + return 0; + + if (netif_is_vrf(dev)) { + ifindex = dev->ifindex; + } else { + vrf_ptr = rcu_dereference(dev->vrf_ptr); + if (vrf_ptr) + ifindex = vrf_ptr->ifindex; + } + + return ifindex; +} + +static inline int vrf_master_ifindex(const struct net_device *dev) +{ + int ifindex; + + rcu_read_lock(); + ifindex = vrf_master_ifindex_rcu(dev); + rcu_read_unlock(); + + return ifindex; +} + +/* called with rcu_read_lock */ +static inline int vrf_dev_table_rcu(const struct net_device *dev) +{ + int tb_id = 0; + + if (dev) { + struct net_vrf_dev *vrf_ptr; + + vrf_ptr = rcu_dereference(dev->vrf_ptr); + if (vrf_ptr) + tb_id = vrf_ptr->tb_id; + } + return tb_id; +} + +static inline int vrf_dev_table(const struct net_device *dev) +{ + int tb_id; + + rcu_read_lock(); + tb_id = vrf_dev_table_rcu(dev); + rcu_read_unlock(); + + return tb_id; +} + +static inline int vrf_dev_table_ifindex(struct net *net, int ifindex) +{ + struct net_device *dev; + int tb_id = 0; + + if (!ifindex) + return 0; + + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, ifindex); + if (dev) + tb_id = vrf_dev_table_rcu(dev); + + rcu_read_unlock(); + + return tb_id; +} + +/* called with rtnl */ +static inline int vrf_dev_table_rtnl(const struct net_device *dev) +{ + int tb_id = 0; + + if (dev) { + struct net_vrf_dev *vrf_ptr; + + vrf_ptr = rtnl_dereference(dev->vrf_ptr); + if (vrf_ptr) + tb_id = vrf_ptr->tb_id; + } + return tb_id; +} + +/* caller has already checked netif_is_vrf(dev) */ +static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev) +{ + struct rtable *rth = ERR_PTR(-ENETUNREACH); + struct net_vrf *vrf = netdev_priv(dev); + + if (vrf) { + rth = vrf->rth; + atomic_inc(&rth->dst.__refcnt); + } + return rth; +} + +#else +static inline int vrf_master_ifindex_rcu(const struct net_device *dev) +{ + return 0; +} + +static inline int vrf_master_ifindex(const struct net_device *dev) +{ + return 0; +} + +static inline int vrf_dev_table_rcu(const struct net_device *dev) +{ + return 0; +} + +static inline int vrf_dev_table(const struct net_device *dev) +{ + return 0; +} + +static inline int vrf_dev_table_ifindex(struct net *net, int ifindex) +{ + return 0; +} + +static inline int vrf_dev_table_rtnl(const struct net_device *dev) +{ + return 0; +} + +static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev) +{ + return ERR_PTR(-ENETUNREACH); +} +#endif + +#endif /* __LINUX_NET_VRF_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index eb8d721cdb67..e4534f1b2d8c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -181,7 +181,6 @@ struct vxlan_dev { #define VXLAN_F_GBP 0x800 #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 #define VXLAN_F_COLLECT_METADATA 0x2000 -#define VXLAN_F_FLOW_BASED 0x4000 /* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable @@ -190,8 +189,7 @@ struct vxlan_dev { VXLAN_F_UDP_ZERO_CSUM6_RX | \ VXLAN_F_REMCSUM_RX | \ VXLAN_F_REMCSUM_NOPARTIAL | \ - VXLAN_F_COLLECT_METADATA | \ - VXLAN_F_FLOW_BASED) + VXLAN_F_COLLECT_METADATA) struct net_device *vxlan_dev_create(struct net *net, const char *name, u8 name_assign_type, struct vxlan_config *conf); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f0ee97eec24d..312e3fee9ccf 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -285,10 +285,13 @@ struct xfrm_policy_afinfo { unsigned short family; struct dst_ops *dst_ops; void (*garbage_collect)(struct net *net); - struct dst_entry *(*dst_lookup)(struct net *net, int tos, + struct dst_entry *(*dst_lookup)(struct net *net, + int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr); - int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr); + int (*get_saddr)(struct net *net, int oif, + xfrm_address_t *saddr, + xfrm_address_t *daddr); void (*decode_session)(struct sk_buff *skb, struct flowi *fl, int reverse); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 2ce13c109b00..92a48e2d5461 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -114,6 +114,7 @@ enum bpf_map_type { BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PROG_ARRAY, + BPF_MAP_TYPE_PERF_EVENT_ARRAY, }; enum bpf_prog_type { @@ -270,6 +271,7 @@ enum bpf_func_id { */ BPF_FUNC_skb_get_tunnel_key, BPF_FUNC_skb_set_tunnel_key, + BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */ __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ea047480a1f0..313c305fd1ad 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -230,6 +230,7 @@ enum { IFLA_BR_AGEING_TIME, IFLA_BR_STP_STATE, IFLA_BR_PRIORITY, + IFLA_BR_VLAN_FILTERING, __IFLA_BR_MAX, }; @@ -340,6 +341,15 @@ enum macvlan_macaddr_mode { #define MACVLAN_FLAG_NOPROMISC 1 +/* VRF section */ +enum { + IFLA_VRF_UNSPEC, + IFLA_VRF_TABLE, + __IFLA_VRF_MAX +}; + +#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, @@ -382,7 +392,6 @@ enum { IFLA_VXLAN_REMCSUM_RX, IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, - IFLA_VXLAN_FLOWBASED, IFLA_VXLAN_COLLECT_METADATA, __IFLA_VXLAN_MAX }; diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h index d3d715f8c88f..9e7edfd8141e 100644 --- a/include/uapi/linux/if_packet.h +++ b/include/uapi/linux/if_packet.h @@ -55,6 +55,7 @@ struct sockaddr_ll { #define PACKET_TX_HAS_OFF 19 #define PACKET_QDISC_BYPASS 20 #define PACKET_ROLLOVER_STATS 21 +#define PACKET_FANOUT_DATA 22 #define PACKET_FANOUT_HASH 0 #define PACKET_FANOUT_LB 1 @@ -62,6 +63,8 @@ struct sockaddr_ll { #define PACKET_FANOUT_ROLLOVER 3 #define PACKET_FANOUT_RND 4 #define PACKET_FANOUT_QM 5 +#define PACKET_FANOUT_CBPF 6 +#define PACKET_FANOUT_EBPF 7 #define PACKET_FANOUT_FLAG_ROLLOVER 0x1000 #define PACKET_FANOUT_FLAG_DEFRAG 0x8000 diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index bd3cc11a431f..af4de90ba27d 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -112,6 +112,7 @@ enum { IFLA_GRE_ENCAP_FLAGS, IFLA_GRE_ENCAP_SPORT, IFLA_GRE_ENCAP_DPORT, + IFLA_GRE_COLLECT_METADATA, __IFLA_GRE_MAX, }; diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h new file mode 100644 index 000000000000..7ed9e670814e --- /dev/null +++ b/include/uapi/linux/ila.h @@ -0,0 +1,15 @@ +/* ila.h - ILA Interface */ + +#ifndef _UAPI_LINUX_ILA_H +#define _UAPI_LINUX_ILA_H + +enum { + ILA_ATTR_UNSPEC, + ILA_ATTR_LOCATOR, /* u64 */ + + __ILA_ATTR_MAX, +}; + +#define ILA_ATTR_MAX (__ILA_ATTR_MAX - 1) + +#endif /* _UAPI_LINUX_ILA_H */ diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 80f3b74446a1..38b4fef20219 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -173,6 +173,7 @@ enum { DEVCONF_STABLE_SECRET, DEVCONF_USE_OIF_ADDRS_ONLY, DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT, + DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN, DEVCONF_MAX }; diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 31377bbea3f8..aa84ca396bcb 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -7,10 +7,25 @@ enum lwtunnel_encap_types { LWTUNNEL_ENCAP_NONE, LWTUNNEL_ENCAP_MPLS, LWTUNNEL_ENCAP_IP, + LWTUNNEL_ENCAP_ILA, __LWTUNNEL_ENCAP_MAX, }; #define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1) +enum lwtunnel_ip_t { + LWTUNNEL_IP_UNSPEC, + LWTUNNEL_IP_ID, + LWTUNNEL_IP_DST, + LWTUNNEL_IP_SRC, + LWTUNNEL_IP_TTL, + LWTUNNEL_IP_TOS, + LWTUNNEL_IP_SPORT, + LWTUNNEL_IP_DPORT, + LWTUNNEL_IP_FLAGS, + __LWTUNNEL_IP_MAX, +}; + +#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1) #endif /* _UAPI_LWTUNNEL_H_ */ diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 2e35c61bbdd1..788655bfa0f3 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -106,6 +106,7 @@ struct ndt_stats { __u64 ndts_rcv_probes_ucast; __u64 ndts_periodic_gc_runs; __u64 ndts_forced_gc_runs; + __u64 ndts_table_fulls; }; enum { diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index efe3443572ba..413417f3707b 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -319,6 +319,7 @@ #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ +#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ /* MSI-X Table entry format */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 47d24cb3fbc1..0d3d3cc43356 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -286,21 +286,6 @@ enum rt_class_t { /* Routing message attributes */ -enum ip_tunnel_t { - IP_TUN_UNSPEC, - IP_TUN_ID, - IP_TUN_DST, - IP_TUN_SRC, - IP_TUN_TTL, - IP_TUN_TOS, - IP_TUN_SPORT, - IP_TUN_DPORT, - IP_TUN_FLAGS, - __IP_TUN_MAX, -}; - -#define IP_TUN_MAX (__IP_TUN_MAX - 1) - enum rtattr_type_t { RTA_UNSPEC, RTA_DST, diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 785c5ca0994b..51b8066a223b 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -77,7 +77,7 @@ #define SND_SOC_TPLG_NUM_TEXTS 16 /* ABI version */ -#define SND_SOC_TPLG_ABI_VERSION 0x2 +#define SND_SOC_TPLG_ABI_VERSION 0x3 /* Max size of TLV data */ #define SND_SOC_TPLG_TLV_SIZE 32 @@ -97,7 +97,8 @@ #define SND_SOC_TPLG_TYPE_PCM 7 #define SND_SOC_TPLG_TYPE_MANIFEST 8 #define SND_SOC_TPLG_TYPE_CODEC_LINK 9 -#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK +#define SND_SOC_TPLG_TYPE_PDATA 10 +#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA /* vendor block IDs - please add new vendor types to end */ #define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 @@ -137,11 +138,19 @@ struct snd_soc_tplg_private { /* * Kcontrol TLV data. */ +struct snd_soc_tplg_tlv_dbscale { + __le32 min; + __le32 step; + __le32 mute; +} __attribute__((packed)); + struct snd_soc_tplg_ctl_tlv { - __le32 size; /* in bytes aligned to 4 */ - __le32 numid; /* control element numeric identification */ - __le32 count; /* number of elem in data array */ - __le32 data[SND_SOC_TPLG_TLV_SIZE]; + __le32 size; /* in bytes of this structure */ + __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */ + union { + __le32 data[SND_SOC_TPLG_TLV_SIZE]; + struct snd_soc_tplg_tlv_dbscale scale; + }; } __attribute__((packed)); /* @@ -155,9 +164,11 @@ struct snd_soc_tplg_channel { } __attribute__((packed)); /* - * Kcontrol Operations IDs + * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops + * Kcontrol ops need get/put/info. + * Bytes ext ops need get/put. */ -struct snd_soc_tplg_kcontrol_ops_id { +struct snd_soc_tplg_io_ops { __le32 get; __le32 put; __le32 info; @@ -171,8 +182,8 @@ struct snd_soc_tplg_ctl_hdr { __le32 type; char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; __le32 access; - struct snd_soc_tplg_kcontrol_ops_id ops; - __le32 tlv_size; /* non zero means control has TLV data */ + struct snd_soc_tplg_io_ops ops; + struct snd_soc_tplg_ctl_tlv tlv; } __attribute__((packed)); /* @@ -238,6 +249,7 @@ struct snd_soc_tplg_manifest { __le32 graph_elems; /* number of graph elements */ __le32 dai_elems; /* number of DAI elements */ __le32 dai_link_elems; /* number of DAI link elements */ + struct snd_soc_tplg_private priv; } __attribute__((packed)); /* @@ -259,7 +271,6 @@ struct snd_soc_tplg_mixer_control { __le32 invert; __le32 num_channels; struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; - struct snd_soc_tplg_ctl_tlv tlv; struct snd_soc_tplg_private priv; } __attribute__((packed)); @@ -303,6 +314,7 @@ struct snd_soc_tplg_bytes_control { __le32 mask; __le32 base; __le32 num_regs; + struct snd_soc_tplg_io_ops ext_ops; struct snd_soc_tplg_private priv; } __attribute__((packed)); @@ -347,6 +359,7 @@ struct snd_soc_tplg_dapm_widget { __le32 reg; /* negative reg = no direct dapm */ __le32 shift; /* bits to shift */ __le32 mask; /* non-shifted mask */ + __le32 subseq; /* sort within widget type */ __u32 invert; /* invert the power bit */ __u32 ignore_suspend; /* kept enabled over suspend */ __u16 event_flags; diff --git a/init/main.c b/init/main.c index c5d5626289ce..56506553d4d8 100644 --- a/init/main.c +++ b/init/main.c @@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void) key_init(); security_init(); dbg_late_init(); - vfs_caches_init(totalram_pages); + vfs_caches_init(); signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a24ba9fe5bb8..161a1807e6ef 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) if (!leaf) return -ENOMEM; INIT_LIST_HEAD(&leaf->msg_list); - info->qsize += sizeof(*leaf); } leaf->priority = msg->m_type; rb_link_node(&leaf->rb_node, parent, p); @@ -187,7 +186,6 @@ try_again: "lazy leaf delete!\n"); rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -200,7 +198,6 @@ try_again: if (list_empty(&leaf->msg_list)) { rb_erase(&leaf->rb_node, &info->msg_tree); if (info->node_cache) { - info->qsize -= sizeof(*leaf); kfree(leaf); } else { info->node_cache = leaf; @@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); new_leaf = NULL; } else { kfree(new_leaf); @@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, /* Save our speculative allocation into the cache */ INIT_LIST_HEAD(&new_leaf->msg_list); info->node_cache = new_leaf; - info->qsize += sizeof(*new_leaf); } else { kfree(new_leaf); } diff --git a/ipc/shm.c b/ipc/shm.c index 06e5cf2fe019..4aef24d91b63 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index cb31229a6fa4..29ace107f236 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -150,15 +150,15 @@ static int __init register_array_map(void) } late_initcall(register_array_map); -static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) +static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) { - /* only bpf_prog file descriptors can be stored in prog_array map */ + /* only file descriptors can be stored in this type of map */ if (attr->value_size != sizeof(u32)) return ERR_PTR(-EINVAL); return array_map_alloc(attr); } -static void prog_array_map_free(struct bpf_map *map) +static void fd_array_map_free(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; @@ -167,21 +167,21 @@ static void prog_array_map_free(struct bpf_map *map) /* make sure it's empty */ for (i = 0; i < array->map.max_entries; i++) - BUG_ON(array->prog[i] != NULL); + BUG_ON(array->ptrs[i] != NULL); kvfree(array); } -static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key) +static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) { return NULL; } /* only called from syscall */ -static int prog_array_map_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags) +static int fd_array_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags) { struct bpf_array *array = container_of(map, struct bpf_array, map); - struct bpf_prog *prog, *old_prog; + void *new_ptr, *old_ptr; u32 index = *(u32 *)key, ufd; if (map_flags != BPF_ANY) @@ -191,57 +191,75 @@ static int prog_array_map_update_elem(struct bpf_map *map, void *key, return -E2BIG; ufd = *(u32 *)value; - prog = bpf_prog_get(ufd); - if (IS_ERR(prog)) - return PTR_ERR(prog); + new_ptr = map->ops->map_fd_get_ptr(map, ufd); + if (IS_ERR(new_ptr)) + return PTR_ERR(new_ptr); - if (!bpf_prog_array_compatible(array, prog)) { - bpf_prog_put(prog); - return -EINVAL; - } - - old_prog = xchg(array->prog + index, prog); - if (old_prog) - bpf_prog_put_rcu(old_prog); + old_ptr = xchg(array->ptrs + index, new_ptr); + if (old_ptr) + map->ops->map_fd_put_ptr(old_ptr); return 0; } -static int prog_array_map_delete_elem(struct bpf_map *map, void *key) +static int fd_array_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_array *array = container_of(map, struct bpf_array, map); - struct bpf_prog *old_prog; + void *old_ptr; u32 index = *(u32 *)key; if (index >= array->map.max_entries) return -E2BIG; - old_prog = xchg(array->prog + index, NULL); - if (old_prog) { - bpf_prog_put_rcu(old_prog); + old_ptr = xchg(array->ptrs + index, NULL); + if (old_ptr) { + map->ops->map_fd_put_ptr(old_ptr); return 0; } else { return -ENOENT; } } +static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog *prog = bpf_prog_get(fd); + if (IS_ERR(prog)) + return prog; + + if (!bpf_prog_array_compatible(array, prog)) { + bpf_prog_put(prog); + return ERR_PTR(-EINVAL); + } + return prog; +} + +static void prog_fd_array_put_ptr(void *ptr) +{ + struct bpf_prog *prog = ptr; + + bpf_prog_put_rcu(prog); +} + /* decrement refcnt of all bpf_progs that are stored in this map */ -void bpf_prog_array_map_clear(struct bpf_map *map) +void bpf_fd_array_map_clear(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; for (i = 0; i < array->map.max_entries; i++) - prog_array_map_delete_elem(map, &i); + fd_array_map_delete_elem(map, &i); } static const struct bpf_map_ops prog_array_ops = { - .map_alloc = prog_array_map_alloc, - .map_free = prog_array_map_free, + .map_alloc = fd_array_map_alloc, + .map_free = fd_array_map_free, .map_get_next_key = array_map_get_next_key, - .map_lookup_elem = prog_array_map_lookup_elem, - .map_update_elem = prog_array_map_update_elem, - .map_delete_elem = prog_array_map_delete_elem, + .map_lookup_elem = fd_array_map_lookup_elem, + .map_update_elem = fd_array_map_update_elem, + .map_delete_elem = fd_array_map_delete_elem, + .map_fd_get_ptr = prog_fd_array_get_ptr, + .map_fd_put_ptr = prog_fd_array_put_ptr, }; static struct bpf_map_type_list prog_array_type __read_mostly = { @@ -255,3 +273,60 @@ static int __init register_prog_array_map(void) return 0; } late_initcall(register_prog_array_map); + +static void perf_event_array_map_free(struct bpf_map *map) +{ + bpf_fd_array_map_clear(map); + fd_array_map_free(map); +} + +static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) +{ + struct perf_event *event; + const struct perf_event_attr *attr; + + event = perf_event_get(fd); + if (IS_ERR(event)) + return event; + + attr = perf_event_attrs(event); + if (IS_ERR(attr)) + return (void *)attr; + + if (attr->type != PERF_TYPE_RAW && + attr->type != PERF_TYPE_HARDWARE) { + perf_event_release_kernel(event); + return ERR_PTR(-EINVAL); + } + return event; +} + +static void perf_event_fd_array_put_ptr(void *ptr) +{ + struct perf_event *event = ptr; + + perf_event_release_kernel(event); +} + +static const struct bpf_map_ops perf_event_array_ops = { + .map_alloc = fd_array_map_alloc, + .map_free = perf_event_array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = fd_array_map_lookup_elem, + .map_update_elem = fd_array_map_update_elem, + .map_delete_elem = fd_array_map_delete_elem, + .map_fd_get_ptr = perf_event_fd_array_get_ptr, + .map_fd_put_ptr = perf_event_fd_array_put_ptr, +}; + +static struct bpf_map_type_list perf_event_array_type __read_mostly = { + .ops = &perf_event_array_ops, + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, +}; + +static int __init register_perf_event_array_map(void) +{ + bpf_register_map_type(&perf_event_array_type); + return 0; +} +late_initcall(register_perf_event_array_map); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index fafa74161445..67c380cfa9ca 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -450,7 +450,7 @@ select_insn: tail_call_cnt++; - prog = READ_ONCE(array->prog[index]); + prog = READ_ONCE(array->ptrs[index]); if (unlikely(!prog)) goto out; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a1b14d197a4f..dc9b464fefa9 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -72,7 +72,7 @@ static int bpf_map_release(struct inode *inode, struct file *filp) /* prog_array stores refcnt-ed bpf_prog pointers * release them all when user space closes prog_array_fd */ - bpf_prog_array_map_clear(map); + bpf_fd_array_map_clear(map); bpf_map_put(map); return 0; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cd307df98cb3..ed12e385fb75 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -238,6 +238,14 @@ static const char * const reg_type_str[] = { [CONST_IMM] = "imm", }; +static const struct { + int map_type; + int func_id; +} func_limit[] = { + {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call}, + {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read}, +}; + static void print_verifier_state(struct verifier_env *env) { enum bpf_reg_type t; @@ -837,6 +845,28 @@ static int check_func_arg(struct verifier_env *env, u32 regno, return err; } +static int check_map_func_compatibility(struct bpf_map *map, int func_id) +{ + bool bool_map, bool_func; + int i; + + if (!map) + return 0; + + for (i = 0; i < ARRAY_SIZE(func_limit); i++) { + bool_map = (map->map_type == func_limit[i].map_type); + bool_func = (func_id == func_limit[i].func_id); + /* only when map & func pair match it can continue. + * don't allow any other map type to be passed into + * the special func; + */ + if (bool_map != bool_func) + return -EINVAL; + } + + return 0; +} + static int check_call(struct verifier_env *env, int func_id) { struct verifier_state *state = &env->cur_state; @@ -912,21 +942,9 @@ static int check_call(struct verifier_env *env, int func_id) return -EINVAL; } - if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY && - func_id != BPF_FUNC_tail_call) - /* prog_array map type needs extra care: - * only allow to pass it into bpf_tail_call() for now. - * bpf_map_delete_elem() can be allowed in the future, - * while bpf_map_update_elem() must only be done via syscall - */ - return -EINVAL; - - if (func_id == BPF_FUNC_tail_call && - map->map_type != BPF_MAP_TYPE_PROG_ARRAY) - /* don't allow any other map type to be passed into - * bpf_tail_call() - */ - return -EINVAL; + err = check_map_func_compatibility(map, func_id); + if (err) + return err; return 0; } diff --git a/kernel/events/core.c b/kernel/events/core.c index d3dae3419b99..e2c6a8886d4d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3212,6 +3212,59 @@ static inline u64 perf_event_count(struct perf_event *event) return __perf_event_count(event); } +/* + * NMI-safe method to read a local event, that is an event that + * is: + * - either for the current task, or for this CPU + * - does not have inherit set, for inherited task events + * will not be local and we cannot read them atomically + * - must not have a pmu::count method + */ +u64 perf_event_read_local(struct perf_event *event) +{ + unsigned long flags; + u64 val; + + /* + * Disabling interrupts avoids all counter scheduling (context + * switches, timer based rotation and IPIs). + */ + local_irq_save(flags); + + /* If this is a per-task event, it must be for current */ + WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && + event->hw.target != current); + + /* If this is a per-CPU event, it must be for this CPU */ + WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && + event->cpu != smp_processor_id()); + + /* + * It must not be an event with inherit set, we cannot read + * all child counters from atomic context. + */ + WARN_ON_ONCE(event->attr.inherit); + + /* + * It must not have a pmu::count method, those are not + * NMI safe. + */ + WARN_ON_ONCE(event->pmu->count); + + /* + * If the event is currently on this CPU, its either a per-task event, + * or local to this CPU. Furthermore it means its ACTIVE (otherwise + * oncpu == -1). + */ + if (event->oncpu == smp_processor_id()) + event->pmu->read(event); + + val = local64_read(&event->count); + local_irq_restore(flags); + + return val; +} + static u64 perf_event_read(struct perf_event *event) { /* @@ -8574,6 +8627,31 @@ void perf_event_delayed_put(struct task_struct *task) WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); } +struct perf_event *perf_event_get(unsigned int fd) +{ + int err; + struct fd f; + struct perf_event *event; + + err = perf_fget_light(fd, &f); + if (err) + return ERR_PTR(err); + + event = f.file->private_data; + atomic_long_inc(&event->refcount); + fdput(f); + + return event; +} + +const struct perf_event_attr *perf_event_attrs(struct perf_event *event) +{ + if (!event) + return ERR_PTR(-EINVAL); + + return &event->attr; +} + /* * inherit a event from parent task to child task: */ diff --git a/kernel/kthread.c b/kernel/kthread.c index 10e489c448fe..fdea0bee7b5a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -97,6 +97,7 @@ bool kthread_should_park(void) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); } +EXPORT_SYMBOL_GPL(kthread_should_park); /** * kthread_freezable_should_stop - should this freezable kthread return now? @@ -171,6 +172,7 @@ void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } +EXPORT_SYMBOL_GPL(kthread_parkme); static int kthread(void *_create) { @@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k) if (kthread) __kthread_unpark(k, kthread); } +EXPORT_SYMBOL_GPL(kthread_unpark); /** * kthread_park - park a thread created by kthread_create(). @@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k) } return ret; } +EXPORT_SYMBOL_GPL(kthread_park); /** * kthread_stop - stop a thread created by kthread_create(). diff --git a/kernel/module.c b/kernel/module.c index 4d2b82e610e2..b86b7bf1be38 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name, } EXPORT_SYMBOL_GPL(find_symbol); -/* Search for module by name: must hold module_mutex. */ +/* + * Search for module by name: must hold module_mutex (or preempt disabled + * for read-only access). + */ static struct module *find_module_all(const char *name, size_t len, bool even_unformed) { struct module *mod; - module_assert_mutex(); + module_assert_mutex_or_preempt(); list_for_each_entry(mod, &modules, list) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) @@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len, struct module *find_module(const char *name) { + module_assert_mutex(); return find_module_all(name, strlen(name), false); } EXPORT_SYMBOL_GPL(find_module); diff --git a/kernel/signal.c b/kernel/signal.c index 836df8dac6cc..0f6bbbe77b46 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) * Other callers might not initialize the si_lsb field, * so check explicitly for the right codes here. */ - if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + if (from->si_signo == SIGBUS && + (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); #endif #ifdef SEGV_BNDERR - err |= __put_user(from->si_lower, &to->si_lower); - err |= __put_user(from->si_upper, &to->si_upper); + if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) { + err |= __put_user(from->si_lower, &to->si_lower); + err |= __put_user(from->si_upper, &to->si_upper); + } #endif break; case __SI_CHLD: @@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; int ret = copy_siginfo_from_user32(&info, uinfo); if (unlikely(ret)) return ret; @@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, int, sig, struct compat_siginfo __user *, uinfo) { - siginfo_t info; + siginfo_t info = {}; if (copy_siginfo_from_user32(&info, uinfo)) return -EFAULT; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 88a041adee90..ef9936df1b04 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -158,6 +158,35 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) return &bpf_trace_printk_proto; } +static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) +{ + struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct perf_event *event; + + if (unlikely(index >= array->map.max_entries)) + return -E2BIG; + + event = (struct perf_event *)array->ptrs[index]; + if (!event) + return -ENOENT; + + /* + * we don't know if the function is run successfully by the + * return value. It can be judged in other places, such as + * eBPF programs. + */ + return perf_event_read_local(event); +} + +const struct bpf_func_proto bpf_perf_event_read_proto = { + .func = bpf_perf_event_read, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, +}; + static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) { switch (func_id) { @@ -183,6 +212,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func return bpf_get_trace_printk_proto(); case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; + case BPF_FUNC_perf_event_read: + return &bpf_perf_event_read_proto; default: return NULL; } diff --git a/lib/Kconfig b/lib/Kconfig index 3a2ef67db6c7..278890dd1049 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -460,16 +460,6 @@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE config LRU_CACHE tristate -config AVERAGE - bool "Averaging functions" - help - This option is provided for the case where no in-kernel-tree - modules require averaging functions, but a module built outside - the kernel tree does. Such modules that use library averaging - functions require Y here. - - If unsure, say N. - config CLZ_TAB bool diff --git a/lib/average.c b/lib/average.c deleted file mode 100644 index 114d1beae0c7..000000000000 --- a/lib/average.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * lib/average.c - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#include -#include -#include -#include -#include - -/** - * DOC: Exponentially Weighted Moving Average (EWMA) - * - * These are generic functions for calculating Exponentially Weighted Moving - * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled - * up internal representation of the average value to prevent rounding errors. - * The factor for scaling up and the exponential weight (or decay rate) have to - * be specified thru the init fuction. The structure should not be accessed - * directly but only thru the helper functions. - */ - -/** - * ewma_init() - Initialize EWMA parameters - * @avg: Average structure - * @factor: Factor to use for the scaled up internal value. The maximum value - * of averages can be ULONG_MAX/(factor*weight). For performance reasons - * factor has to be a power of 2. - * @weight: Exponential weight, or decay rate. This defines how fast the - * influence of older values decreases. For performance reasons weight has - * to be a power of 2. - * - * Initialize the EWMA parameters for a given struct ewma @avg. - */ -void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight) -{ - WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor)); - - avg->weight = ilog2(weight); - avg->factor = ilog2(factor); - avg->internal = 0; -} -EXPORT_SYMBOL(ewma_init); - -/** - * ewma_add() - Exponentially weighted moving average (EWMA) - * @avg: Average structure - * @val: Current value - * - * Add a sample to the average. - */ -struct ewma *ewma_add(struct ewma *avg, unsigned long val) -{ - unsigned long internal = ACCESS_ONCE(avg->internal); - - ACCESS_ONCE(avg->internal) = internal ? - (((internal << avg->weight) - internal) + - (val << avg->factor)) >> avg->weight : - (val << avg->factor); - return avg; -} -EXPORT_SYMBOL(ewma_add); diff --git a/lib/iommu-common.c b/lib/iommu-common.c index df30632f0bef..ff19f66d3f7f 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c @@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, unsigned long align_mask = 0; if (align_order > 0) - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = ~0ul >> (BITS_PER_LONG - align_order); /* Sanity check */ if (unlikely(npages == 0)) { diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 9af7cefb195d..8c1ad1ced72c 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -16,11 +16,14 @@ #include #include #include +#include #include #include #include +#include #include #include +#include #define MAX_ENTRIES 1000000 #define TEST_INSERT_FAIL INT_MAX @@ -45,11 +48,21 @@ static int size = 8; module_param(size, int, 0); MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)"); +static int tcount = 10; +module_param(tcount, int, 0); +MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); + struct test_obj { int value; struct rhash_head node; }; +struct thread_data { + int id; + struct task_struct *task; + struct test_obj *objs; +}; + static struct test_obj array[MAX_ENTRIES]; static struct rhashtable_params test_rht_params = { @@ -60,6 +73,9 @@ static struct rhashtable_params test_rht_params = { .nulls_base = (3U << RHT_BASE_SHIFT), }; +static struct semaphore prestart_sem; +static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); + static int __init test_rht_lookup(struct rhashtable *ht) { unsigned int i; @@ -200,10 +216,97 @@ static s64 __init test_rhashtable(struct rhashtable *ht) static struct rhashtable ht; +static int thread_lookup_test(struct thread_data *tdata) +{ + int i, err = 0; + + for (i = 0; i < entries; i++) { + struct test_obj *obj; + int key = (tdata->id << 16) | i; + + obj = rhashtable_lookup_fast(&ht, &key, test_rht_params); + if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) { + pr_err(" found unexpected object %d\n", key); + err++; + } else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) { + pr_err(" object %d not found!\n", key); + err++; + } else if (obj && (obj->value != key)) { + pr_err(" wrong object returned (got %d, expected %d)\n", + obj->value, key); + err++; + } + } + return err; +} + +static int threadfunc(void *data) +{ + int i, step, err = 0, insert_fails = 0; + struct thread_data *tdata = data; + + up(&prestart_sem); + if (down_interruptible(&startup_sem)) + pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); + + for (i = 0; i < entries; i++) { + tdata->objs[i].value = (tdata->id << 16) | i; + err = rhashtable_insert_fast(&ht, &tdata->objs[i].node, + test_rht_params); + if (err == -ENOMEM || err == -EBUSY) { + tdata->objs[i].value = TEST_INSERT_FAIL; + insert_fails++; + } else if (err) { + pr_err(" thread[%d]: rhashtable_insert_fast failed\n", + tdata->id); + goto out; + } + } + if (insert_fails) + pr_info(" thread[%d]: %d insert failures\n", + tdata->id, insert_fails); + + err = thread_lookup_test(tdata); + if (err) { + pr_err(" thread[%d]: rhashtable_lookup_test failed\n", + tdata->id); + goto out; + } + + for (step = 10; step > 0; step--) { + for (i = 0; i < entries; i += step) { + if (tdata->objs[i].value == TEST_INSERT_FAIL) + continue; + err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, + test_rht_params); + if (err) { + pr_err(" thread[%d]: rhashtable_remove_fast failed\n", + tdata->id); + goto out; + } + tdata->objs[i].value = TEST_INSERT_FAIL; + } + err = thread_lookup_test(tdata); + if (err) { + pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n", + tdata->id); + goto out; + } + } +out: + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + return err; +} + static int __init test_rht_init(void) { - int i, err; + int i, err, started_threads = 0, failed_threads = 0; u64 total_time = 0; + struct thread_data *tdata; + struct test_obj *objs; entries = min(entries, MAX_ENTRIES); @@ -239,6 +342,57 @@ static int __init test_rht_init(void) do_div(total_time, runs); pr_info("Average test time: %llu\n", total_time); + if (!tcount) + return 0; + + pr_info("Testing concurrent rhashtable access from %d threads\n", + tcount); + sema_init(&prestart_sem, 1 - tcount); + tdata = vzalloc(tcount * sizeof(struct thread_data)); + if (!tdata) + return -ENOMEM; + objs = vzalloc(tcount * entries * sizeof(struct test_obj)); + if (!objs) { + vfree(tdata); + return -ENOMEM; + } + + err = rhashtable_init(&ht, &test_rht_params); + if (err < 0) { + pr_warn("Test failed: Unable to initialize hashtable: %d\n", + err); + vfree(tdata); + vfree(objs); + return -EINVAL; + } + for (i = 0; i < tcount; i++) { + tdata[i].id = i; + tdata[i].objs = objs + i * entries; + tdata[i].task = kthread_run(threadfunc, &tdata[i], + "rhashtable_thrad[%d]", i); + if (IS_ERR(tdata[i].task)) + pr_err(" kthread_run failed for thread %d\n", i); + else + started_threads++; + } + if (down_interruptible(&prestart_sem)) + pr_err(" down interruptible failed\n"); + for (i = 0; i < tcount; i++) + up(&startup_sem); + for (i = 0; i < tcount; i++) { + if (IS_ERR(tdata[i].task)) + continue; + if ((err = kthread_stop(tdata[i].task))) { + pr_warn("Test failed: thread %d returned: %d\n", + i, err); + failed_threads++; + } + } + pr_info("Started %d threads, %d failed\n", + started_threads, failed_threads); + rhashtable_destroy(&ht); + vfree(tdata); + vfree(objs); return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c107094f79ba..097c7a4bfbd9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page, /* after clearing PageTail the gup refcount can be released */ smp_mb__after_atomic(); - /* - * retain hwpoison flag of the poisoned tail page: - * fix for the unsuitable process killed on Guest Machine(KVM) - * by the memory-failure. - */ - page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON; + page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (page->flags & ((1L << PG_referenced) | (1L << PG_swapbacked) | diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c53543d89282..ea5a93659488 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page) * directly for tail pages. */ if (PageTransHuge(head)) { + /* + * Non anonymous thp exists only in allocation/free time. We + * can't handle such a case correctly, so let's give it up. + * This should be better than triggering BUG_ON when kernel + * tries to touch the "partially handled" page. + */ + if (!PageAnon(head)) { + pr_err("MCE: %#lx: non anonymous thp\n", + page_to_pfn(page)); + return 0; + } + if (get_page_unless_zero(head)) { if (PageTail(page)) get_page(page); @@ -1134,15 +1146,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags) } if (!PageHuge(p) && PageTransHuge(hpage)) { - if (!PageAnon(hpage)) { - pr_err("MCE: %#lx: non anonymous thp\n", pfn); - if (TestClearPageHWPoison(p)) - atomic_long_sub(nr_pages, &num_poisoned_pages); - put_page(p); - if (p != hpage) - put_page(hpage); - return -EBUSY; - } if (unlikely(split_huge_page(hpage))) { pr_err("MCE: %#lx: thp split failed\n", pfn); if (TestClearPageHWPoison(p)) @@ -1209,9 +1212,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags) if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); atomic_long_sub(nr_pages, &num_poisoned_pages); + unlock_page(hpage); put_page(hpage); - res = 0; - goto out; + return 0; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) @@ -1656,6 +1659,8 @@ static int __soft_offline_page(struct page *page, int flags) inc_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); list_add(&page->lru, &pagelist); + if (!TestSetPageHWPoison(page)) + atomic_long_inc(&num_poisoned_pages); ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, MIGRATE_SYNC, MR_MEMORY_FAILURE); if (ret) { @@ -1670,9 +1675,8 @@ static int __soft_offline_page(struct page *page, int flags) pfn, ret, page->flags); if (ret > 0) ret = -EIO; - } else { - SetPageHWPoison(page); - atomic_long_inc(&num_poisoned_pages); + if (TestClearPageHWPoison(page)) + atomic_long_dec(&num_poisoned_pages); } } else { pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 26fbba7d888f..003dbe4b060d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; - unsigned long flags; + unsigned long flags, pfn; int ret; zone_type = zone - pgdat->node_zones; @@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); + + /* online_page_range is called later and expects pages reserved */ + for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) { + if (!pfn_valid(pfn)) + continue; + + SetPageReserved(pfn_to_page(pfn)); + } return 0; } diff --git a/mm/migrate.c b/mm/migrate.c index ee401e4e5ef1..eb4267107d1f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage, /* Establish migration ptes or remove ptes */ if (page_mapped(page)) { try_to_unmap(page, - TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); + TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| + TTU_IGNORE_HWPOISON); page_was_mapped = 1; } @@ -950,7 +951,10 @@ out: list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (reason != MR_MEMORY_FAILURE) + /* Soft-offlined page shouldn't go through lru cache list */ + if (reason == MR_MEMORY_FAILURE) + put_page(page); + else putback_lru_page(page); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 22cddd3e5de8..5cccc127ef81 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = { */ void __init page_writeback_init(void) { + BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); + writeback_set_ratelimit(); register_cpu_notifier(&ratelimit_nb); - - BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL)); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef19f22b2b7d..beda41710802 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page, #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) -/* Only safe to use early in boot when initialisation is single-threaded */ + static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; int __meminit early_pfn_to_nid(unsigned long pfn) { + static DEFINE_SPINLOCK(early_pfn_lock); int nid; - /* The system will behave unpredictably otherwise */ - BUG_ON(system_state != SYSTEM_BOOTING); - + spin_lock(&early_pfn_lock); nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); - if (nid >= 0) - return nid; - /* just returns 0 */ - return 0; + if (nid < 0) + nid = 0; + spin_unlock(&early_pfn_lock); + + return nid; } #endif @@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page, __free_pages_boot_core(page, pfn, 0); } -static __initdata DECLARE_RWSEM(pgdat_init_rwsem); +/* Completion tracking for deferred_init_memmap() threads */ +static atomic_t pgdat_init_n_undone __initdata; +static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); + +static inline void __init pgdat_init_report_one_done(void) +{ + if (atomic_dec_and_test(&pgdat_init_n_undone)) + complete(&pgdat_init_all_done_comp); +} /* Initialise remaining memory on a node */ static int __init deferred_init_memmap(void *data) @@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data) const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (first_init_pfn == ULONG_MAX) { - up_read(&pgdat_init_rwsem); + pgdat_init_report_one_done(); return 0; } @@ -1177,7 +1184,8 @@ free_range: pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, jiffies_to_msecs(jiffies - start)); - up_read(&pgdat_init_rwsem); + + pgdat_init_report_one_done(); return 0; } @@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void) { int nid; + /* There will be num_node_state(N_MEMORY) threads */ + atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); for_each_node_state(nid, N_MEMORY) { - down_read(&pgdat_init_rwsem); kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); } /* Block until all are initialised */ - down_write(&pgdat_init_rwsem); - up_write(&pgdat_init_rwsem); + wait_for_completion(&pgdat_init_all_done_comp); + + /* Reinit limits that are based on free pages after the kernel is up */ + files_maxfiles_init(); } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page) bad_reason = "non-NULL mapping"; if (unlikely(atomic_read(&page->_count) != 0)) bad_reason = "nonzero _count"; + if (unlikely(page->flags & __PG_HWPOISON)) { + bad_reason = "HWPoisoned (hardware-corrupted)"; + bad_flags = __PG_HWPOISON; + } if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; diff --git a/mm/shmem.c b/mm/shmem.c index 4caf8ed24d65..dbe0c1e8349c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3363,8 +3363,8 @@ put_path: * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be * kernel internal. There will be NO LSM permission checks against the * underlying inode. So users of this interface must do LSM checks at a - * higher layer. The one user is the big_key implementation. LSM checks - * are provided at the key level rather than the inode level. + * higher layer. The users are the big_key and shm implementations. LSM + * checks are provided at the key or shm level rather than the inode. * @name: name for dentry (to be seen in /proc//maps * @size: size to be set for the file * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size diff --git a/mm/slab_common.c b/mm/slab_common.c index 3e5f8f29c286..86831105a09f 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache; SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB) -#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ - SLAB_CACHE_DMA | SLAB_NOTRACK) +#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK) /* * Merge control. If this is set then no merging of slab caches will occur. diff --git a/mm/vmscan.c b/mm/vmscan.c index e61445dce04e..8286938c70de 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, * caller can stall after page list has been processed. * * 2) Global or new memcg reclaim encounters a page that is - * not marked for immediate reclaim or the caller does not - * have __GFP_IO. In this case mark the page for immediate + * not marked for immediate reclaim, or the caller does not + * have __GFP_FS (or __GFP_IO if it's simply going to swap, + * not to fs). In this case mark the page for immediate * reclaim and continue scanning. * - * __GFP_IO is checked because a loop driver thread might + * Require may_enter_fs because we would wait on fs, which + * may not have submitted IO yet. And the loop driver might * enter reclaim, and deadlock if it waits on a page for * which it is needed to do the write (loop masks off * __GFP_IO|__GFP_FS for this reason); but more thought * would probably show more reasons. * - * Don't require __GFP_FS, since we're not going into the - * FS, just waiting on its writeback completion. Worryingly, - * ext4 gfs2 and xfs allocate pages with - * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing - * may_enter_fs here is liable to OOM on them. - * * 3) Legacy memcg encounters a page that is not already marked * PageReclaim. memcg does not have any dirty pages * throttling so we could easily OOM just because too many @@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 2 above */ } else if (sane_reclaim(sc) || - !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { + !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile index eb8baa72adc8..c6ffc55ee0d7 100644 --- a/net/6lowpan/Makefile +++ b/net/6lowpan/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_6LOWPAN) += 6lowpan.o -6lowpan-y := iphc.o nhc.o +6lowpan-y := core.o iphc.o nhc.o #rfc6282 nhcs obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c new file mode 100644 index 000000000000..ae1896fa45e2 --- /dev/null +++ b/net/6lowpan/core.c @@ -0,0 +1,40 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Authors: + * (C) 2015 Pengutronix, Alexander Aring + */ + +#include + +#include + +void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype) +{ + lowpan_priv(dev)->lltype = lltype; +} +EXPORT_SYMBOL(lowpan_netdev_setup); + +static int __init lowpan_module_init(void) +{ + request_module_nowait("ipv6"); + + request_module_nowait("nhc_dest"); + request_module_nowait("nhc_fragment"); + request_module_nowait("nhc_hop"); + request_module_nowait("nhc_ipv6"); + request_module_nowait("nhc_mobility"); + request_module_nowait("nhc_routing"); + request_module_nowait("nhc_udp"); + + return 0; +} +module_init(lowpan_module_init); + +MODULE_LICENSE("GPL"); diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c index 9055d7b9d112..1e0071fdcf72 100644 --- a/net/6lowpan/iphc.c +++ b/net/6lowpan/iphc.c @@ -48,7 +48,6 @@ #include #include -#include #include #include #include @@ -284,7 +283,7 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp))) return -EINVAL; - hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30); + hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30); memcpy(&hdr.flow_lbl[1], &skb->data[0], 2); skb_pull(skb, 2); break; @@ -610,21 +609,3 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, return 0; } EXPORT_SYMBOL_GPL(lowpan_header_compress); - -static int __init lowpan_module_init(void) -{ - request_module_nowait("ipv6"); - - request_module_nowait("nhc_dest"); - request_module_nowait("nhc_fragment"); - request_module_nowait("nhc_hop"); - request_module_nowait("nhc_ipv6"); - request_module_nowait("nhc_mobility"); - request_module_nowait("nhc_routing"); - request_module_nowait("nhc_udp"); - - return 0; -} -module_init(lowpan_module_init); - -MODULE_LICENSE("GPL"); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 01d7ba840df8..fded86508117 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -791,10 +791,9 @@ void vlan_setup(struct net_device *dev) { ether_setup(dev); - dev->priv_flags |= IFF_802_1Q_VLAN; + dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); - dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops; dev->destructor = vlan_dev_free; diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index fb54e6aed096..cc7d87d64987 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -19,6 +19,7 @@ #include "main.h" #include +#include #include #include #include @@ -453,7 +454,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, int j; /* check if orig node candidate is running DAT */ - if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT)) + if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities)) goto out; /* Check if this node has already been selected... */ @@ -713,9 +714,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, uint16_t tvlv_value_len) { if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT; + clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities); else - orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT; + set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities); } /** @@ -1138,6 +1139,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @skb: packet to check * @hdr_size: size of the encapsulation header + * + * Returns true if the packet was snooped and consumed by DAT. False if the + * packet has to be delivered to the interface */ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, struct sk_buff *skb, int hdr_size) @@ -1145,7 +1149,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, uint16_t type; __be32 ip_src, ip_dst; uint8_t *hw_src, *hw_dst; - bool ret = false; + bool dropped = false; unsigned short vid; if (!atomic_read(&bat_priv->distributed_arp_table)) @@ -1174,12 +1178,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, /* if this REPLY is directed to a client of mine, let's deliver the * packet to the interface */ - ret = !batadv_is_my_client(bat_priv, hw_dst, vid); + dropped = !batadv_is_my_client(bat_priv, hw_dst, vid); + + /* if this REPLY is sent on behalf of a client of mine, let's drop the + * packet because the client will reply by itself + */ + dropped |= batadv_is_my_client(bat_priv, hw_src, vid); out: - if (ret) + if (dropped) kfree_skb(skb); - /* if ret == false -> packet has to be delivered to the interface */ - return ret; + /* if dropped == false -> deliver to the interface */ + return dropped; } /** diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index bb0158620628..6012e2b4af4f 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -153,15 +153,11 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) struct batadv_neigh_node *router; struct batadv_neigh_ifinfo *router_ifinfo; struct batadv_gw_node *gw_node, *curr_gw = NULL; - uint32_t max_gw_factor = 0, tmp_gw_factor = 0; - uint32_t gw_divisor; + uint64_t max_gw_factor = 0, tmp_gw_factor = 0; uint8_t max_tq = 0; uint8_t tq_avg; struct batadv_orig_node *orig_node; - gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; - gw_divisor *= 64; - rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) { if (gw_node->deleted) @@ -187,7 +183,7 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) tmp_gw_factor = tq_avg * tq_avg; tmp_gw_factor *= gw_node->bandwidth_down; tmp_gw_factor *= 100 * 100; - tmp_gw_factor /= gw_divisor; + tmp_gw_factor >>= 18; if ((tmp_gw_factor > max_gw_factor) || ((tmp_gw_factor == max_gw_factor) && @@ -439,6 +435,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; + gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); + gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); atomic_set(&gw_node->refcount, 1); spin_lock_bh(&bat_priv->gw.list_lock); diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 7aa480b7edd0..68a9554961eb 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -19,6 +19,8 @@ #include "main.h" #include +#include +#include #include #include #include @@ -588,19 +590,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, * * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, * orig, has toggled then this method updates counter and list accordingly. + * + * Caller needs to hold orig->mcast_handler_lock. */ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig, uint8_t mcast_flags) { + struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node; + struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list; + /* switched from flag unset to set */ if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES && !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) { atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node, - &bat_priv->mcast.want_all_unsnoopables_list); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(!hlist_unhashed(node)); + + hlist_add_head_rcu(node, head); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); /* switched from flag set to unset */ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) && @@ -608,7 +617,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(hlist_unhashed(node)); + + hlist_del_init_rcu(node); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); } } @@ -621,19 +633,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, * * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has * toggled then this method updates counter and list accordingly. + * + * Caller needs to hold orig->mcast_handler_lock. */ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig, uint8_t mcast_flags) { + struct hlist_node *node = &orig->mcast_want_all_ipv4_node; + struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list; + /* switched from flag unset to set */ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 && !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) { atomic_inc(&bat_priv->mcast.num_want_all_ipv4); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node, - &bat_priv->mcast.want_all_ipv4_list); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(!hlist_unhashed(node)); + + hlist_add_head_rcu(node, head); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); /* switched from flag set to unset */ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) && @@ -641,7 +660,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, atomic_dec(&bat_priv->mcast.num_want_all_ipv4); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_del_rcu(&orig->mcast_want_all_ipv4_node); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(hlist_unhashed(node)); + + hlist_del_init_rcu(node); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); } } @@ -654,19 +676,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, * * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has * toggled then this method updates counter and list accordingly. + * + * Caller needs to hold orig->mcast_handler_lock. */ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, struct batadv_orig_node *orig, uint8_t mcast_flags) { + struct hlist_node *node = &orig->mcast_want_all_ipv6_node; + struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list; + /* switched from flag unset to set */ if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 && !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) { atomic_inc(&bat_priv->mcast.num_want_all_ipv6); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node, - &bat_priv->mcast.want_all_ipv6_list); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(!hlist_unhashed(node)); + + hlist_add_head_rcu(node, head); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); /* switched from flag set to unset */ } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) && @@ -674,7 +703,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, atomic_dec(&bat_priv->mcast.num_want_all_ipv6); spin_lock_bh(&bat_priv->mcast.want_lists_lock); - hlist_del_rcu(&orig->mcast_want_all_ipv6_node); + /* flag checks above + mcast_handler_lock prevents this */ + WARN_ON(hlist_unhashed(node)); + + hlist_del_init_rcu(node); spin_unlock_bh(&bat_priv->mcast.want_lists_lock); } } @@ -697,39 +729,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, uint8_t mcast_flags = BATADV_NO_FLAGS; bool orig_initialized; - orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST; + if (orig_mcast_enabled && tvlv_value && + (tvlv_value_len >= sizeof(mcast_flags))) + mcast_flags = *(uint8_t *)tvlv_value; + + spin_lock_bh(&orig->mcast_handler_lock); + orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST, + &orig->capa_initialized); /* If mcast support is turned on decrease the disabled mcast node * counter only if we had increased it for this node before. If this * is a completely new orig_node no need to decrease the counter. */ if (orig_mcast_enabled && - !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) { + !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) { if (orig_initialized) atomic_dec(&bat_priv->mcast.num_disabled); - orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); /* If mcast support is being switched off or if this is an initial * OGM without mcast support then increase the disabled mcast * node counter. */ } else if (!orig_mcast_enabled && - (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || + (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) || !orig_initialized)) { atomic_inc(&bat_priv->mcast.num_disabled); - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; + clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities); } - orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST; - - if (orig_mcast_enabled && tvlv_value && - (tvlv_value_len >= sizeof(mcast_flags))) - mcast_flags = *(uint8_t *)tvlv_value; + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized); batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags); batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags); batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags); orig->mcast_flags = mcast_flags; + spin_unlock_bh(&orig->mcast_handler_lock); } /** @@ -763,11 +798,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) { struct batadv_priv *bat_priv = orig->bat_priv; - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && - orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) + spin_lock_bh(&orig->mcast_handler_lock); + + if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) && + test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized)) atomic_dec(&bat_priv->mcast.num_disabled); batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); + + spin_unlock_bh(&orig->mcast_handler_lock); } diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index f0a50f31d822..46604010dcd4 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -19,6 +19,7 @@ #include "main.h" #include +#include #include #include #include @@ -134,9 +135,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, uint16_t tvlv_value_len) { if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC; + clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities); else - orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC; + set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities); } /** @@ -894,7 +895,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, goto out; /* check if orig node is network coding enabled */ - if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC)) + if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities)) goto out; /* accept ogms from 'good' neighbors and single hop neighbors */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 018b7495ad84..32a0fcfab36d 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -696,8 +696,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, orig_node->last_seen = jiffies; reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); orig_node->bcast_seqno_reset = reset_time; + #ifdef CONFIG_BATMAN_ADV_MCAST orig_node->mcast_flags = BATADV_NO_FLAGS; + INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node); + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node); + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node); + spin_lock_init(&orig_node->mcast_handler_lock); #endif /* create a vlan object for the "untagged" LAN */ diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 0a01992e65ab..191076ef1eca 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -616,7 +616,8 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, * we delete only packets belonging to the given interface */ if ((hard_iface) && - (forw_packet->if_incoming != hard_iface)) + (forw_packet->if_incoming != hard_iface) && + (forw_packet->if_outgoing != hard_iface)) continue; spin_unlock_bh(&bat_priv->forw_bcast_list_lock); diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index c002961da75d..49d3d3aa59cb 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -202,6 +202,7 @@ static int batadv_interface_tx(struct sk_buff *skb, int gw_mode; enum batadv_forw_mode forw_mode; struct batadv_orig_node *mcast_single_orig = NULL; + int network_offset = ETH_HLEN; if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto dropped; @@ -214,14 +215,18 @@ static int batadv_interface_tx(struct sk_buff *skb, case ETH_P_8021Q: vhdr = vlan_eth_hdr(skb); - if (vhdr->h_vlan_encapsulated_proto != ethertype) + if (vhdr->h_vlan_encapsulated_proto != ethertype) { + network_offset += VLAN_HLEN; break; + } /* fall through */ case ETH_P_BATMAN: goto dropped; } + skb_set_network_header(skb, network_offset); + if (batadv_bla_tx(bat_priv, skb, vid)) goto dropped; @@ -479,6 +484,9 @@ out: */ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) { + if (!vlan) + return; + if (atomic_dec_and_test(&vlan->refcount)) { spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); hlist_del_rcu(&vlan->list); @@ -933,7 +941,7 @@ static void batadv_softif_init_early(struct net_device *dev) dev->netdev_ops = &batadv_netdev_ops; dev->destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; /* can't call min_mtu, because the needed variables * have not been initialized yet diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index b4824951010b..db06de20d996 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -19,6 +19,7 @@ #include "main.h" #include +#include #include #include #include @@ -594,6 +595,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", + addr, BATADV_PRINT_VID(vid))) + goto out; batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", @@ -1034,6 +1038,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, struct batadv_tt_local_entry *tt_local_entry; uint16_t flags, curr_flags = BATADV_NO_FLAGS; struct batadv_softif_vlan *vlan; + void *tt_entry_exists; tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); if (!tt_local_entry) @@ -1061,11 +1066,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv, * immediately purge it */ batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); - hlist_del_rcu(&tt_local_entry->common.hash_entry); + + tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, + batadv_compare_tt, + batadv_choose_tt, + &tt_local_entry->common); + if (!tt_entry_exists) + goto out; + + /* extra call to free the local tt entry */ batadv_tt_local_entry_free_ref(tt_local_entry); /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); + if (!vlan) + goto out; + batadv_softif_vlan_free_ref(vlan); batadv_softif_vlan_free_ref(vlan); @@ -1166,8 +1182,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common_entry->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } @@ -1862,7 +1880,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, } spin_unlock_bh(list_lock); } - orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT; + clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); } static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global, @@ -2195,7 +2213,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) spin_lock_bh(&bat_priv->tt.req_list_lock); list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { - list_del(&node->list); + list_del_init(&node->list); kfree(node); } @@ -2231,7 +2249,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv) list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (batadv_has_timed_out(node->issued_at, BATADV_TT_REQUEST_TIMEOUT)) { - list_del(&node->list); + list_del_init(&node->list); kfree(node); } } @@ -2513,7 +2531,8 @@ out: batadv_hardif_free_ref(primary_if); if (ret && tt_req_node) { spin_lock_bh(&bat_priv->tt.req_list_lock); - list_del(&tt_req_node->list); + /* list_del_init() verifies tt_req_node still is in the list */ + list_del_init(&tt_req_node->list); spin_unlock_bh(&bat_priv->tt.req_list_lock); kfree(tt_req_node); } @@ -2821,7 +2840,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv, return; } } - orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT; + set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); } static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, @@ -2950,7 +2969,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv, list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (!batadv_compare_eth(node->addr, resp_src)) continue; - list_del(&node->list); + list_del_init(&node->list); kfree(node); } @@ -3207,8 +3226,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) /* decrease the reference held for this vlan */ vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); - batadv_softif_vlan_free_ref(vlan); - batadv_softif_vlan_free_ref(vlan); + if (vlan) { + batadv_softif_vlan_free_ref(vlan); + batadv_softif_vlan_free_ref(vlan); + } batadv_tt_local_entry_free_ref(tt_local); } @@ -3321,7 +3342,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, bool has_tt_init; tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff; - has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT; + has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT, + &orig_node->capa_initialized); /* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 67d63483618e..55610a805b53 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -221,6 +221,7 @@ struct batadv_orig_bat_iv { * @batadv_dat_addr_t: address of the orig node in the distributed hash * @last_seen: time when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset + * @mcast_handler_lock: synchronizes mcast-capability and -flag changes * @mcast_flags: multicast flags announced by the orig node * @mcast_want_all_unsnoop_node: a list node for the * mcast.want_all_unsnoopables list @@ -268,13 +269,15 @@ struct batadv_orig_node { unsigned long last_seen; unsigned long bcast_seqno_reset; #ifdef CONFIG_BATMAN_ADV_MCAST + /* synchronizes mcast tvlv specific orig changes */ + spinlock_t mcast_handler_lock; uint8_t mcast_flags; struct hlist_node mcast_want_all_unsnoopables_node; struct hlist_node mcast_want_all_ipv4_node; struct hlist_node mcast_want_all_ipv6_node; #endif - uint8_t capabilities; - uint8_t capa_initialized; + unsigned long capabilities; + unsigned long capa_initialized; atomic_t last_ttvn; unsigned char *tt_buff; int16_t tt_buff_len; @@ -313,10 +316,10 @@ struct batadv_orig_node { * (= orig node announces a tvlv of type BATADV_TVLV_MCAST) */ enum batadv_orig_capabilities { - BATADV_ORIG_CAPA_HAS_DAT = BIT(0), - BATADV_ORIG_CAPA_HAS_NC = BIT(1), - BATADV_ORIG_CAPA_HAS_TT = BIT(2), - BATADV_ORIG_CAPA_HAS_MCAST = BIT(3), + BATADV_ORIG_CAPA_HAS_DAT, + BATADV_ORIG_CAPA_HAS_NC, + BATADV_ORIG_CAPA_HAS_TT, + BATADV_ORIG_CAPA_HAS_MCAST, }; /** diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 0ffe2e24020a..131e79cde350 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -85,7 +85,7 @@ struct lowpan_dev { static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev) { - return netdev_priv(netdev); + return (struct lowpan_dev *)lowpan_priv(netdev)->priv; } static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer) @@ -848,8 +848,9 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) struct net_device *netdev; int err = 0; - netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE, - NET_NAME_UNKNOWN, netdev_setup); + netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)), + IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, + netdev_setup); if (!netdev) return -ENOMEM; @@ -859,7 +860,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); SET_NETDEV_DEVTYPE(netdev, &bt_type); - *dev = netdev_priv(netdev); + *dev = lowpan_dev(netdev); (*dev)->netdev = netdev; (*dev)->hdev = chan->conn->hcon->hdev; INIT_LIST_HEAD(&(*dev)->peers); @@ -869,6 +870,8 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev) list_add_rcu(&(*dev)->list, &bt_6lowpan_devices); spin_unlock(&devices_lock); + lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE); + err = register_netdev(netdev); if (err < 0) { BT_INFO("register_netdev failed %d", err); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c index 238ddd3cf95f..e32f34189007 100644 --- a/net/bluetooth/amp.c +++ b/net/bluetooth/amp.c @@ -379,7 +379,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev, amp_ctrl_put(ctrl); hci_req_init(&req, hdev); - hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp); + hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); hci_req_run_skb(&req, write_remote_amp_assoc_complete); kfree(cp); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 2c48bf0b5afb..b4548c739a64 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -64,6 +64,48 @@ static void hci_le_create_connection_cancel(struct hci_conn *conn) hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); } +/* This function requires the caller holds hdev->lock */ +static void hci_connect_le_scan_cleanup(struct hci_conn *conn) +{ + struct hci_conn_params *params; + struct smp_irk *irk; + bdaddr_t *bdaddr; + u8 bdaddr_type; + + bdaddr = &conn->dst; + bdaddr_type = conn->dst_type; + + /* Check if we need to convert to identity address */ + irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type); + if (irk) { + bdaddr = &irk->bdaddr; + bdaddr_type = irk->addr_type; + } + + params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type); + if (!params) + return; + + /* The connection attempt was doing scan for new RPA, and is + * in scan phase. If params are not associated with any other + * autoconnect action, remove them completely. If they are, just unmark + * them as waiting for connection, by clearing explicit_connect field. + */ + if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT) + hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type); + else + params->explicit_connect = false; +} + +/* This function requires the caller holds hdev->lock */ +static void hci_connect_le_scan_remove(struct hci_conn *conn) +{ + hci_connect_le_scan_cleanup(conn); + + hci_conn_hash_del(conn->hdev, conn); + hci_update_background_scan(conn->hdev); +} + static void hci_acl_create_connection(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -340,8 +382,12 @@ static void hci_conn_timeout(struct work_struct *work) if (conn->out) { if (conn->type == ACL_LINK) hci_acl_create_connection_cancel(conn); - else if (conn->type == LE_LINK) - hci_le_create_connection_cancel(conn); + else if (conn->type == LE_LINK) { + if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + hci_connect_le_scan_remove(conn); + else + hci_le_create_connection_cancel(conn); + } } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { hci_reject_sco(conn); } @@ -637,15 +683,18 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct hci_conn *conn; - if (status == 0) - return; + hci_dev_lock(hdev); + + conn = hci_lookup_le_connect(hdev); + + if (!status) { + hci_connect_le_scan_cleanup(conn); + goto done; + } BT_ERR("HCI request failed to create LE connection: status 0x%2.2x", status); - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); if (!conn) goto done; @@ -685,6 +734,7 @@ static void hci_req_add_le_create_conn(struct hci_request *req, hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); conn->state = BT_CONNECT; + clear_bit(HCI_CONN_SCANNING, &conn->flags); } static void hci_req_directed_advertising(struct hci_request *req, @@ -728,7 +778,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 role) { struct hci_conn_params *params; - struct hci_conn *conn; + struct hci_conn *conn, *conn_unfinished; struct smp_irk *irk; struct hci_request req; int err; @@ -751,26 +801,29 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * and return the object found. */ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); + conn_unfinished = NULL; if (conn) { - conn->pending_sec_level = sec_level; - goto done; + if (conn->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &conn->flags)) { + BT_DBG("will continue unfinished conn %pMR", dst); + conn_unfinished = conn; + } else { + if (conn->pending_sec_level < sec_level) + conn->pending_sec_level = sec_level; + goto done; + } } /* Since the controller supports only one LE connection attempt at a * time, we return -EBUSY if there is any connection attempt running. */ - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (conn) + if (hci_lookup_le_connect(hdev)) return ERR_PTR(-EBUSY); /* When given an identity address with existing identity * resolving key, the connection needs to be established * to a resolvable random address. * - * This uses the cached random resolvable address from - * a previous scan. When no cached address is available, - * try connecting to the identity address instead. - * * Storing the resolvable random address is required here * to handle connection failures. The address will later * be resolved back into the original identity address @@ -782,15 +835,23 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, dst_type = ADDR_LE_DEV_RANDOM; } - conn = hci_conn_add(hdev, LE_LINK, dst, role); + if (conn_unfinished) { + conn = conn_unfinished; + bacpy(&conn->dst, dst); + } else { + conn = hci_conn_add(hdev, LE_LINK, dst, role); + } + if (!conn) return ERR_PTR(-ENOMEM); conn->dst_type = dst_type; conn->sec_level = BT_SECURITY_LOW; - conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; + if (!conn_unfinished) + conn->pending_sec_level = sec_level; + hci_req_init(&req, hdev); /* Disable advertising if we're active. For master role @@ -854,6 +915,144 @@ create_conn: return ERR_PTR(err); } +done: + /* If this is continuation of connect started by hci_connect_le_scan, + * it already called hci_conn_hold and calling it again would mess the + * counter. + */ + if (!conn_unfinished) + hci_conn_hold(conn); + + return conn; +} + +static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + struct hci_conn *conn; + + if (!status) + return; + + BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x", + status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); + if (conn) + hci_le_conn_failed(conn, status); + + hci_dev_unlock(hdev); +} + +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); + if (!conn) + return false; + + if (conn->dst_type != type) + return false; + + if (conn->state != BT_CONNECTED) + return false; + + return true; +} + +/* This function requires the caller holds hdev->lock */ +static int hci_explicit_conn_params_set(struct hci_request *req, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_dev *hdev = req->hdev; + struct hci_conn_params *params; + + if (is_connected(hdev, addr, addr_type)) + return -EISCONN; + + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return -EIO; + + /* If we created new params, or existing params were marked as disabled, + * mark them to be used just once to connect. + */ + if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + list_del_init(¶ms->action); + list_add(¶ms->action, &hdev->pend_le_conns); + } + + params->explicit_connect = true; + __hci_update_background_scan(req); + + BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, + params->auto_connect); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, u8 sec_level, + u16 conn_timeout, u8 role) +{ + struct hci_conn *conn; + struct hci_request req; + int err; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + + return ERR_PTR(-EOPNOTSUPP); + } + + /* Some devices send ATT messages as soon as the physical link is + * established. To be able to handle these ATT messages, the user- + * space first establishes the connection and then starts the pairing + * process. + * + * So if a hci_conn object already exists for the following connection + * attempt, we simply update pending_sec_level and auth_type fields + * and return the object found. + */ + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); + if (conn) { + if (conn->pending_sec_level < sec_level) + conn->pending_sec_level = sec_level; + goto done; + } + + BT_DBG("requesting refresh of dst_addr"); + + conn = hci_conn_add(hdev, LE_LINK, dst, role); + if (!conn) + return ERR_PTR(-ENOMEM); + + hci_req_init(&req, hdev); + + if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0) + return ERR_PTR(-EBUSY); + + conn->state = BT_CONNECT; + set_bit(HCI_CONN_SCANNING, &conn->flags); + + err = hci_req_run(&req, hci_connect_le_scan_complete); + if (err && err != -ENODATA) { + hci_conn_del(conn); + return ERR_PTR(err); + } + + conn->dst_type = dst_type; + conn->sec_level = BT_SECURITY_LOW; + conn->pending_sec_level = sec_level; + conn->conn_timeout = conn_timeout; + done: hci_conn_hold(conn); return conn; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index bc43b6490555..adcbc74c2432 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2847,6 +2847,30 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, return NULL; } +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev, + bdaddr_t *addr, + u8 addr_type) +{ + struct hci_conn_params *param; + + list_for_each_entry(param, &hdev->pend_le_conns, action) { + if (bacmp(¶m->addr, addr) == 0 && + param->addr_type == addr_type && + param->explicit_connect) + return param; + } + + list_for_each_entry(param, &hdev->pend_le_reports, action) { + if (bacmp(¶m->addr, addr) == 0 && + param->addr_type == addr_type && + param->explicit_connect) + return param; + } + + return NULL; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) @@ -2916,6 +2940,15 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev) list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue; + + /* If trying to estabilish one time connection to disabled + * device, leave the params, but mark them as just once. + */ + if (params->explicit_connect) { + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; + } + list_del(¶ms->list); kfree(params); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 218d7dfc342f..7ba35a9ba6b7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1059,7 +1059,7 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_set_flag(hdev, HCI_LE_ADV); - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); + conn = hci_lookup_le_connect(hdev); if (conn) queue_delayed_work(hdev->workqueue, &conn->le_conn_timeout, @@ -4447,7 +4447,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) */ hci_dev_clear_flag(hdev, HCI_LE_ADV); - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); + conn = hci_lookup_le_connect(hdev); if (!conn) { conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role); if (!conn) { @@ -4640,42 +4640,49 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, /* If we're not connectable only connect devices that we have in * our pend_le_conns list. */ - params = hci_pend_le_action_lookup(&hdev->pend_le_conns, - addr, addr_type); + params = hci_explicit_connect_lookup(hdev, addr, addr_type); + if (!params) return NULL; - switch (params->auto_connect) { - case HCI_AUTO_CONN_DIRECT: - /* Only devices advertising with ADV_DIRECT_IND are - * triggering a connection attempt. This is allowing - * incoming connections from slave devices. - */ - if (adv_type != LE_ADV_DIRECT_IND) + if (!params->explicit_connect) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_DIRECT: + /* Only devices advertising with ADV_DIRECT_IND are + * triggering a connection attempt. This is allowing + * incoming connections from slave devices. + */ + if (adv_type != LE_ADV_DIRECT_IND) + return NULL; + break; + case HCI_AUTO_CONN_ALWAYS: + /* Devices advertising with ADV_IND or ADV_DIRECT_IND + * are triggering a connection attempt. This means + * that incoming connectioms from slave device are + * accepted and also outgoing connections to slave + * devices are established when found. + */ + break; + default: return NULL; - break; - case HCI_AUTO_CONN_ALWAYS: - /* Devices advertising with ADV_IND or ADV_DIRECT_IND - * are triggering a connection attempt. This means - * that incoming connectioms from slave device are - * accepted and also outgoing connections to slave - * devices are established when found. - */ - break; - default: - return NULL; + } } conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); if (!IS_ERR(conn)) { - /* Store the pointer since we don't really have any + /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned + * by higher layer that tried to connect, if no then + * store the pointer since we don't really have any * other owner of the object besides the params that * triggered it. This way we can abort the connection if * the parameters get removed and keep the reference * count consistent once the connection is established. */ - params->conn = hci_conn_get(conn); + + if (!params->explicit_connect) + params->conn = hci_conn_get(conn); + return conn; } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index d6025d6e6d59..b7369220c9ef 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -317,7 +317,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) * address be updated at the next cycle. */ if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { + hci_lookup_le_connect(hdev)) { BT_DBG("Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return; @@ -479,7 +479,6 @@ void hci_update_page_scan(struct hci_dev *hdev) void __hci_update_background_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; - struct hci_conn *conn; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || @@ -529,8 +528,7 @@ void __hci_update_background_scan(struct hci_request *req) * since some controllers are not able to scan and connect at * the same time. */ - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (conn) + if (hci_lookup_le_connect(hdev)) return; /* If controller is currently scanning, we stop it to ensure we diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 45fffa413642..7c65ee200c29 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7113,8 +7113,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, else role = HCI_ROLE_MASTER; - hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level, - HCI_LE_CONN_TIMEOUT, role); + hcon = hci_connect_le_scan(hdev, dst, dst_type, + chan->sec_level, + HCI_LE_CONN_TIMEOUT, + role); } else { u8 auth_type = l2cap_get_auth_type(chan); hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 7ab191589541..ccaf5a436d8f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3564,9 +3564,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, */ hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); - conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type, - sec_level, HCI_LE_CONN_TIMEOUT, - HCI_ROLE_MASTER); + conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, + addr_type, sec_level, + HCI_LE_CONN_TIMEOUT, + HCI_ROLE_MASTER); } if (IS_ERR(conn)) { @@ -4210,7 +4211,7 @@ static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status) /* Don't let discovery abort an outgoing connection attempt * that's using directed advertising. */ - if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { + if (hci_lookup_le_connect(hdev)) { *status = MGMT_STATUS_REJECTED; return false; } @@ -6107,6 +6108,12 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr, switch (auto_connect) { case HCI_AUTO_CONN_DISABLED: case HCI_AUTO_CONN_LINK_LOSS: + /* If auto connect is being disabled when we're trying to + * connect to device, keep connecting. + */ + if (params->explicit_connect) + list_add(¶ms->action, &hdev->pend_le_conns); + __hci_update_background_scan(req); break; case HCI_AUTO_CONN_REPORT: @@ -7843,7 +7850,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) /* Make sure we copy only the significant bytes based on the * encryption key size, and set the rest of the value to zeroes. */ - memcpy(ev.key.val, key->val, sizeof(key->enc_size)); + memcpy(ev.key.val, key->val, key->enc_size); memset(ev.key.val + key->enc_size, 0, sizeof(ev.key.val) - key->enc_size); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 0aa8f5cf46a1..6ed2feb51e3c 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -365,8 +365,7 @@ void br_dev_setup(struct net_device *dev) dev->destructor = br_dev_free; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); - dev->tx_queue_len = 0; - dev->priv_flags = IFF_EBRIDGE; + dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 91a2e08c2bb8..dbcb1949ea58 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void) + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ + + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + 0; } @@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, + [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, }; /* Change the state of the port and notify spanning tree */ @@ -724,6 +728,7 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 }, [IFLA_BR_STP_STATE] = { .type = NLA_U32 }, [IFLA_BR_PRIORITY] = { .type = NLA_U16 }, + [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 }, }; static int br_changelink(struct net_device *brdev, struct nlattr *tb[], @@ -771,6 +776,14 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], br_stp_set_bridge_priority(br, priority); } + if (data[IFLA_BR_VLAN_FILTERING]) { + u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]); + + err = __br_vlan_filter_toggle(br, vlan_filter); + if (err) + return err; + } + return 0; } @@ -782,6 +795,7 @@ static size_t br_get_size(const struct net_device *brdev) nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */ nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */ nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */ + nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */ 0; } @@ -794,13 +808,15 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 stp_enabled = br->stp_enabled; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; + u8 vlan_enabled = br_vlan_enabled(br); if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) || nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) || nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) || - nla_put_u16(skb, IFLA_BR_PRIORITY, priority)) + nla_put_u16(skb, IFLA_BR_PRIORITY, priority) || + nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled)) return -EMSGSIZE; return 0; @@ -833,7 +849,7 @@ struct rtnl_link_ops br_link_ops __read_mostly = { .kind = "bridge", .priv_size = sizeof(struct net_bridge), .setup = br_dev_setup, - .maxtype = IFLA_BRPORT_MAX, + .maxtype = IFLA_BR_MAX, .policy = br_policy, .validate = br_validate, .newlink = br_dev_newlink, diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e2cb359f9dd3..3d95647039d0 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -614,6 +614,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid); void br_vlan_flush(struct net_bridge *br); bool br_vlan_find(struct net_bridge *br, u16 vid); void br_recalculate_fwd_mask(struct net_bridge *br); +int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val); int br_vlan_set_proto(struct net_bridge *br, unsigned long val); int br_vlan_init(struct net_bridge *br); @@ -771,6 +772,12 @@ static inline int br_vlan_enabled(struct net_bridge *br) { return 0; } + +static inline int __br_vlan_filter_toggle(struct net_bridge *br, + unsigned long val) +{ + return -EOPNOTSUPP; +} #endif struct nf_br_ops { diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 0d41f81838ff..3cef6892c0bb 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -468,21 +468,27 @@ void br_recalculate_fwd_mask(struct net_bridge *br) ~(1u << br->group_addr[5]); } -int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) +int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) { - if (!rtnl_trylock()) - return restart_syscall(); - if (br->vlan_enabled == val) - goto unlock; + return 0; br->vlan_enabled = val; br_manage_promisc(br); recalculate_group_addr(br); br_recalculate_fwd_mask(br); -unlock: + return 0; +} + +int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) +{ + if (!rtnl_trylock()) + return restart_syscall(); + + __br_vlan_filter_toggle(br, val); rtnl_unlock(); + return 0; } diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index edbca468fa73..d730a0f68f46 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -177,7 +177,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt) skb->protocol = htons(ETH_P_CAIF); /* Check if we need to handle xoff */ - if (likely(caifd->netdev->tx_queue_len == 0)) + if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE)) goto noxoff; if (unlikely(caifd->xoff)) diff --git a/net/core/datagram.c b/net/core/datagram.c index 4967262b2707..617088aee21d 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -131,12 +131,12 @@ out_noerr: goto out; } -static int skb_set_peeked(struct sk_buff *skb) +static struct sk_buff *skb_set_peeked(struct sk_buff *skb) { struct sk_buff *nskb; if (skb->peeked) - return 0; + return skb; /* We have to unshare an skb before modifying it. */ if (!skb_shared(skb)) @@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb) nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) - return -ENOMEM; + return ERR_PTR(-ENOMEM); skb->prev->next = nskb; skb->next->prev = nskb; @@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb) done: skb->peeked = 1; - return 0; + return skb; } /** @@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, continue; } - error = skb_set_peeked(skb); - if (error) + skb = skb_set_peeked(skb); + error = PTR_ERR(skb); + if (IS_ERR(skb)) goto unlock_err; atomic_inc(&skb->users); diff --git a/net/core/dev.c b/net/core/dev.c index 4870c3556a5a..b1f3f4844e60 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6997,6 +6997,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); + if (!dev->tx_queue_len) + printk(KERN_WARNING "%s uses DEPRECATED zero tx_queue_len - convert driver to use IFF_NO_QUEUE instead.\n", name); + dev->num_tx_queues = txqs; dev->real_num_tx_queues = txqs; if (netif_alloc_netdev_queues(dev)) diff --git a/net/core/filter.c b/net/core/filter.c index a50dbfa83ad9..83f08cefeab7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1124,6 +1124,7 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, *pfp = fp; return 0; } +EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); void bpf_prog_destroy(struct bpf_prog *fp) { @@ -1348,7 +1349,7 @@ const struct bpf_func_proto bpf_l3_csum_replace_proto = { static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) { struct sk_buff *skb = (struct sk_buff *) (long) r1; - u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags); + bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags); int offset = (int) r2; __sum16 sum, *ptr; diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 5d6d8e3d450a..3331585174d9 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -241,3 +241,58 @@ int lwtunnel_output(struct sock *sk, struct sk_buff *skb) return __lwtunnel_output(sk, skb, lwtstate); } EXPORT_SYMBOL(lwtunnel_output); + +int __lwtunnel_input(struct sk_buff *skb, + struct lwtunnel_state *lwtstate) +{ + const struct lwtunnel_encap_ops *ops; + int ret = -EINVAL; + + if (!lwtstate) + goto drop; + + if (lwtstate->type == LWTUNNEL_ENCAP_NONE || + lwtstate->type > LWTUNNEL_ENCAP_MAX) + return 0; + + ret = -EOPNOTSUPP; + rcu_read_lock(); + ops = rcu_dereference(lwtun_encaps[lwtstate->type]); + if (likely(ops && ops->input)) + ret = ops->input(skb); + rcu_read_unlock(); + + if (ret == -EOPNOTSUPP) + goto drop; + + return ret; + +drop: + kfree_skb(skb); + + return ret; +} + +int lwtunnel_input6(struct sk_buff *skb) +{ + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + struct lwtunnel_state *lwtstate = NULL; + + if (rt) + lwtstate = rt->rt6i_lwtstate; + + return __lwtunnel_input(skb, lwtstate); +} +EXPORT_SYMBOL(lwtunnel_input6); + +int lwtunnel_input(struct sk_buff *skb) +{ + struct rtable *rt = (struct rtable *)skb_dst(skb); + struct lwtunnel_state *lwtstate = NULL; + + if (rt) + lwtstate = rt->rt_lwtstate; + + return __lwtunnel_input(skb, lwtstate); +} +EXPORT_SYMBOL(lwtunnel_input); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 84195dacb8b6..2b515ba7e94f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -274,8 +274,12 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device (entries >= tbl->gc_thresh2 && time_after(now, tbl->last_flush + 5 * HZ))) { if (!neigh_forced_gc(tbl) && - entries >= tbl->gc_thresh3) + entries >= tbl->gc_thresh3) { + net_info_ratelimited("%s: neighbor table overflow!\n", + tbl->id); + NEIGH_CACHE_STAT_INC(tbl, table_fulls); goto out_entries; + } } n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC); @@ -1849,6 +1853,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; ndst.ndts_forced_gc_runs += st->forced_gc_runs; + ndst.ndts_table_fulls += st->table_fulls; } if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst)) @@ -2717,12 +2722,12 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v) struct neigh_statistics *st = v; if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n"); + seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n"); return 0; } seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx " - "%08lx %08lx %08lx %08lx %08lx\n", + "%08lx %08lx %08lx %08lx %08lx %08lx\n", atomic_read(&tbl->entries), st->allocs, @@ -2739,7 +2744,8 @@ static int neigh_stat_seq_show(struct seq_file *seq, void *v) st->periodic_gc_runs, st->forced_gc_runs, - st->unres_discards + st->unres_discards, + st->table_fulls ); return 0; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 194c1d03b2b3..b279077c3089 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -689,7 +689,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, struct rps_map *old_map, *map; cpumask_var_t mask; int err, cpu, i; - static DEFINE_SPINLOCK(rps_map_lock); + static DEFINE_MUTEX(rps_map_mutex); if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -722,18 +722,21 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, map = NULL; } - spin_lock(&rps_map_lock); + mutex_lock(&rps_map_mutex); old_map = rcu_dereference_protected(queue->rps_map, - lockdep_is_held(&rps_map_lock)); + mutex_is_locked(&rps_map_mutex)); rcu_assign_pointer(queue->rps_map, map); - spin_unlock(&rps_map_lock); if (map) static_key_slow_inc(&rps_needed); - if (old_map) { - kfree_rcu(old_map, rcu); + if (old_map) static_key_slow_dec(&rps_needed); - } + + mutex_unlock(&rps_map_mutex); + + if (old_map) + kfree_rcu(old_map, rcu); + free_cpumask_var(mask); return len; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 0e0fb30cbc04..de8d5cc5eb24 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3513,8 +3513,6 @@ static int pktgen_thread_worker(void *arg) set_freezable(); - __set_current_state(TASK_RUNNING); - while (!kthread_should_stop()) { pkt_dev = next_to_run(t); @@ -3559,7 +3557,6 @@ static int pktgen_thread_worker(void *arg) try_to_freeze(); } - set_current_state(TASK_INTERRUPTIBLE); pr_debug("%s stopping all device\n", t->tsk->comm); pktgen_stop(t); diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 87b22c0bc08c..b42f0e26f89e 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) spin_lock_bh(&queue->syn_wait_lock); while ((req = lopt->syn_table[i]) != NULL) { lopt->syn_table[i] = req->dl_next; + /* Because of following del_timer_sync(), + * we must release the spinlock here + * or risk a dead lock. + */ + spin_unlock_bh(&queue->syn_wait_lock); atomic_inc(&lopt->qlen_dec); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); reqsk_put(req); + spin_lock_bh(&queue->syn_wait_lock); } spin_unlock_bh(&queue->syn_wait_lock); } diff --git a/net/core/utils.c b/net/core/utils.c index a7732a068043..3dffce953c39 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -301,7 +301,7 @@ out: EXPORT_SYMBOL(in6_pton); void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr) + __be32 from, __be32 to, bool pseudohdr) { if (skb->ip_summed != CHECKSUM_PARTIAL) { csum_replace4(sum, from, to); @@ -318,7 +318,7 @@ EXPORT_SYMBOL(inet_proto_csum_replace4); void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, - int pseudohdr) + bool pseudohdr) { __be32 diff[] = { ~from[0], ~from[1], ~from[2], ~from[3], @@ -336,6 +336,19 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, } EXPORT_SYMBOL(inet_proto_csum_replace16); +void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, + __wsum diff, bool pseudohdr) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) { + *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); + if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) + skb->csum = ~csum_add(diff, ~skb->csum); + } else if (pseudohdr) { + *sum = ~csum_fold(csum_add(diff, csum_unfold(*sum))); + } +} +EXPORT_SYMBOL(inet_proto_csum_replace_by_diff); + struct __net_random_once_work { struct work_struct work; struct static_key *key; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index b445d492c115..053eb2b8e682 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -554,6 +554,31 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, return 0; } +static int dsa_of_probe_links(struct dsa_platform_data *pd, + struct dsa_chip_data *cd, + int chip_index, int port_index, + struct device_node *port, + const char *port_name) +{ + struct device_node *link; + int link_index; + int ret; + + for (link_index = 0;; link_index++) { + link = of_parse_phandle(port, "link", link_index); + if (!link) + break; + + if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) { + ret = dsa_of_setup_routing_table(pd, cd, chip_index, + port_index, link); + if (ret) + return ret; + } + } + return 0; +} + static void dsa_of_free_platform_data(struct dsa_platform_data *pd) { int i; @@ -573,8 +598,8 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd) static int dsa_of_probe(struct device *dev) { struct device_node *np = dev->of_node; - struct device_node *child, *mdio, *ethernet, *port, *link; - struct mii_bus *mdio_bus; + struct device_node *child, *mdio, *ethernet, *port; + struct mii_bus *mdio_bus, *mdio_bus_switch; struct net_device *ethernet_dev; struct dsa_platform_data *pd; struct dsa_chip_data *cd; @@ -636,6 +661,16 @@ static int dsa_of_probe(struct device *dev) if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) cd->eeprom_len = eeprom_len; + mdio = of_parse_phandle(child, "mii-bus", 0); + if (mdio) { + mdio_bus_switch = of_mdio_find_bus(mdio); + if (!mdio_bus_switch) { + ret = -EPROBE_DEFER; + goto out_free_chip; + } + cd->host_dev = &mdio_bus_switch->dev; + } + for_each_available_child_of_node(child, port) { port_reg = of_get_property(port, "reg", NULL); if (!port_reg) @@ -658,15 +693,10 @@ static int dsa_of_probe(struct device *dev) goto out_free_chip; } - link = of_parse_phandle(port, "link", 0); - - if (!strcmp(port_name, "dsa") && link && - pd->nr_chips > 1) { - ret = dsa_of_setup_routing_table(pd, cd, - chip_index, port_index, link); - if (ret) - goto out_free_chip; - } + ret = dsa_of_probe_links(pd, cd, chip_index, + port_index, port, port_name); + if (ret) + goto out_free_chip; } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 0010c690cc67..cce97385f743 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -200,103 +200,212 @@ out: return 0; } -static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid, u16 nlm_flags) +static int dsa_bridge_check_vlan_range(struct dsa_switch *ds, + const struct net_device *bridge, + u16 vid_begin, u16 vid_end) { - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->parent; - int ret = -EOPNOTSUPP; + struct dsa_slave_priv *p; + struct net_device *dev, *vlan_br; + DECLARE_BITMAP(members, DSA_MAX_PORTS); + DECLARE_BITMAP(untagged, DSA_MAX_PORTS); + u16 vid; + int member, err; - if (ds->drv->fdb_add) - ret = ds->drv->fdb_add(ds, p->port, addr, vid); + if (!ds->drv->vlan_getnext || !vid_begin) + return -EOPNOTSUPP; - return ret; + vid = vid_begin - 1; + + do { + err = ds->drv->vlan_getnext(ds, &vid, members, untagged); + if (err) + break; + + if (vid > vid_end) + break; + + member = find_first_bit(members, DSA_MAX_PORTS); + if (member == DSA_MAX_PORTS) + continue; + + dev = ds->ports[member]; + p = netdev_priv(dev); + vlan_br = p->bridge_dev; + if (vlan_br == bridge) + continue; + + netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid); + return -EOPNOTSUPP; + } while (vid < vid_end); + + return err == -ENOENT ? 0 : err; } -static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) +static int dsa_slave_port_vlan_add(struct net_device *dev, + struct switchdev_obj *obj) { + struct switchdev_obj_vlan *vlan = &obj->u.vlan; struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int ret = -EOPNOTSUPP; + u16 vid; + int err; - if (ds->drv->fdb_del) - ret = ds->drv->fdb_del(ds, p->port, addr, vid); + switch (obj->trans) { + case SWITCHDEV_TRANS_PREPARE: + if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set) + return -EOPNOTSUPP; - return ret; -} + /* If the requested port doesn't belong to the same bridge as + * the VLAN members, fallback to software VLAN (hopefully). + */ + err = dsa_bridge_check_vlan_range(ds, p->bridge_dev, + vlan->vid_begin, + vlan->vid_end); + if (err) + return err; + break; + case SWITCHDEV_TRANS_COMMIT: + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + err = ds->drv->port_vlan_add(ds, p->port, vid, + vlan->flags & + BRIDGE_VLAN_INFO_UNTAGGED); + if (!err && vlan->flags & BRIDGE_VLAN_INFO_PVID) + err = ds->drv->port_pvid_set(ds, p->port, vid); + if (err) + return err; + } + break; + default: + return -EOPNOTSUPP; + } -static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb, - const unsigned char *addr, u16 vid, - bool is_static, - u32 portid, u32 seq, int type, - unsigned int flags) -{ - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_EXT_LEARNED; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dev->ifindex; - ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - - if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) - goto nla_put_failure; - - if (vid && nla_put_u16(skb, NDA_VLAN, vid)) - goto nla_put_failure; - - nlmsg_end(skb, nlh); return 0; - -nla_put_failure: - nlmsg_cancel(skb, nlh); - return -EMSGSIZE; } -/* Dump information about entries, in response to GETNEIGH */ -static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, int idx) +static int dsa_slave_port_vlan_del(struct net_device *dev, + struct switchdev_obj *obj) +{ + struct switchdev_obj_vlan *vlan = &obj->u.vlan; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + u16 vid; + int err; + + if (!ds->drv->port_vlan_del) + return -EOPNOTSUPP; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + err = ds->drv->port_vlan_del(ds, p->port, vid); + if (err) + return err; + } + + return 0; +} + +static int dsa_slave_port_vlan_dump(struct net_device *dev, + struct switchdev_obj *obj) +{ + struct switchdev_obj_vlan *vlan = &obj->u.vlan; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + DECLARE_BITMAP(members, DSA_MAX_PORTS); + DECLARE_BITMAP(untagged, DSA_MAX_PORTS); + u16 pvid, vid = 0; + int err; + + if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get) + return -EOPNOTSUPP; + + err = ds->drv->port_pvid_get(ds, p->port, &pvid); + if (err) + return err; + + for (;;) { + err = ds->drv->vlan_getnext(ds, &vid, members, untagged); + if (err) + break; + + if (!test_bit(p->port, members)) + continue; + + memset(vlan, 0, sizeof(*vlan)); + vlan->vid_begin = vlan->vid_end = vid; + + if (vid == pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + if (test_bit(p->port, untagged)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + err = obj->cb(dev, obj); + if (err) + break; + } + + return err == -ENOENT ? 0 : err; +} + +static int dsa_slave_port_fdb_add(struct net_device *dev, + struct switchdev_obj *obj) +{ + struct switchdev_obj_fdb *fdb = &obj->u.fdb; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + if (obj->trans == SWITCHDEV_TRANS_PREPARE) + ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP; + else if (obj->trans == SWITCHDEV_TRANS_COMMIT) + ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid); + + return ret; +} + +static int dsa_slave_port_fdb_del(struct net_device *dev, + struct switchdev_obj *obj) +{ + struct switchdev_obj_fdb *fdb = &obj->u.fdb; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + if (ds->drv->port_fdb_del) + ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid); + + return ret; +} + +static int dsa_slave_port_fdb_dump(struct net_device *dev, + struct switchdev_obj *obj) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; unsigned char addr[ETH_ALEN] = { 0 }; + u16 vid = 0; int ret; - if (!ds->drv->fdb_getnext) + if (!ds->drv->port_fdb_getnext) return -EOPNOTSUPP; - for (; ; idx++) { + for (;;) { bool is_static; - ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static); + ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid, + &is_static); if (ret < 0) break; - if (idx < cb->args[0]) - continue; + obj->u.fdb.addr = addr; + obj->u.fdb.vid = vid; + obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - ret = dsa_slave_fill_info(dev, skb, addr, 0, - is_static, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWNEIGH, NLM_F_MULTI); + ret = obj->cb(dev, obj); if (ret < 0) break; } - return idx; + return ret == -ENOENT ? 0 : ret; } static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -364,6 +473,71 @@ static int dsa_slave_port_attr_set(struct net_device *dev, return ret; } +static int dsa_slave_port_obj_add(struct net_device *dev, + struct switchdev_obj *obj) +{ + int err; + + /* For the prepare phase, ensure the full set of changes is feasable in + * one go in order to signal a failure properly. If an operation is not + * supported, return -EOPNOTSUPP. + */ + + switch (obj->id) { + case SWITCHDEV_OBJ_PORT_FDB: + err = dsa_slave_port_fdb_add(dev, obj); + break; + case SWITCHDEV_OBJ_PORT_VLAN: + err = dsa_slave_port_vlan_add(dev, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dsa_slave_port_obj_del(struct net_device *dev, + struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_PORT_FDB: + err = dsa_slave_port_fdb_del(dev, obj); + break; + case SWITCHDEV_OBJ_PORT_VLAN: + err = dsa_slave_port_vlan_del(dev, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dsa_slave_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj) +{ + int err; + + switch (obj->id) { + case SWITCHDEV_OBJ_PORT_FDB: + err = dsa_slave_port_fdb_dump(dev, obj); + break; + case SWITCHDEV_OBJ_PORT_VLAN: + err = dsa_slave_port_vlan_dump(dev, obj); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + static int dsa_slave_bridge_port_join(struct net_device *dev, struct net_device *br) { @@ -765,9 +939,9 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_fdb_add = dsa_slave_fdb_add, - .ndo_fdb_del = dsa_slave_fdb_del, - .ndo_fdb_dump = dsa_slave_fdb_dump, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_do_ioctl = dsa_slave_ioctl, .ndo_get_iflink = dsa_slave_get_iflink, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -775,11 +949,17 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, .ndo_poll_controller = dsa_slave_poll_controller, #endif + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, }; static const struct switchdev_ops dsa_slave_switchdev_ops = { .switchdev_port_attr_get = dsa_slave_port_attr_get, .switchdev_port_attr_set = dsa_slave_port_attr_set, + .switchdev_port_obj_add = dsa_slave_port_obj_add, + .switchdev_port_obj_del = dsa_slave_port_obj_del, + .switchdev_port_obj_dump = dsa_slave_port_obj_dump, }; static void dsa_slave_adjust_link(struct net_device *dev) @@ -834,7 +1014,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, return -ENODEV; /* Use already configured phy mode */ - p->phy_interface = p->phy->interface; + if (p->phy_interface == PHY_INTERFACE_MODE_NA) + p->phy_interface = p->phy->interface; phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, p->phy_interface); @@ -966,7 +1147,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->features = master->vlan_features; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; eth_hw_addr_inherit(slave_dev, master); - slave_dev->tx_queue_len = 0; + slave_dev->priv_flags |= IFF_NO_QUEUE; slave_dev->netdev_ops = &dsa_slave_netdev_ops; slave_dev->switchdev_ops = &dsa_slave_switchdev_ops; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 77e0f0e7a88e..217127c3a3ef 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -114,7 +114,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev, EXPORT_SYMBOL(eth_header); /** - * eth_get_headlen - determine the the length of header for an ethernet frame + * eth_get_headlen - determine the length of header for an ethernet frame * @data: pointer to start of frame * @len: total length of frame * diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 44d27469ae55..35a9788bb3ae 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -392,7 +392,7 @@ void hsr_dev_setup(struct net_device *dev) dev->header_ops = &hsr_header_ops; dev->netdev_ops = &hsr_device_ops; SET_NETDEV_DEVTYPE(dev, &hsr_type); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->destructor = hsr_dev_destroy; diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h index e50f69da78eb..ea339fa94c27 100644 --- a/net/ieee802154/6lowpan/6lowpan_i.h +++ b/net/ieee802154/6lowpan/6lowpan_i.h @@ -5,6 +5,7 @@ #include #include +#include struct lowpan_create_arg { u16 tag; @@ -37,26 +38,18 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a) } } -struct lowpan_dev_record { - struct net_device *ldev; - struct list_head list; -}; - /* private device info */ struct lowpan_dev_info { struct net_device *real_dev; /* real WPAN device ptr */ - struct mutex dev_list_mtx; /* mutex for list ops */ u16 fragment_tag; }; static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { - return netdev_priv(dev); + return (struct lowpan_dev_info *)lowpan_priv(dev)->priv; } -extern struct list_head lowpan_devices; - int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type); void lowpan_net_frag_exit(void); int lowpan_net_frag_init(void); diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index f20a387a1011..953b1c49f5d1 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -52,8 +52,7 @@ #include "6lowpan_i.h" -LIST_HEAD(lowpan_devices); -static int lowpan_open_count; +static int open_count; static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, @@ -91,7 +90,7 @@ static void lowpan_setup(struct net_device *dev) dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = IPV6_MIN_MTU; - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; @@ -114,7 +113,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_device *real_dev; - struct lowpan_dev_record *entry; int ret; ASSERT_RTNL(); @@ -133,67 +131,52 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; } - lowpan_dev_info(dev)->real_dev = real_dev; - mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { + if (real_dev->ieee802154_ptr->lowpan_dev) { dev_put(real_dev); - lowpan_dev_info(dev)->real_dev = NULL; - return -ENOMEM; + return -EBUSY; } - entry->ldev = dev; - + lowpan_dev_info(dev)->real_dev = real_dev; /* Set the lowpan hardware address to the wpan hardware address. */ memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN); - mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); - INIT_LIST_HEAD(&entry->list); - list_add_tail(&entry->list, &lowpan_devices); - mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); + lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154); ret = register_netdevice(dev); - if (ret >= 0) { - if (!lowpan_open_count) - lowpan_rx_init(); - lowpan_open_count++; + if (ret < 0) { + dev_put(real_dev); + return ret; } - return ret; + real_dev->ieee802154_ptr->lowpan_dev = dev; + if (!open_count) + lowpan_rx_init(); + + open_count++; + + return 0; } static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; - struct lowpan_dev_record *entry, *tmp; ASSERT_RTNL(); - lowpan_open_count--; - if (!lowpan_open_count) + open_count--; + + if (!open_count) lowpan_rx_exit(); - mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); - list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { - if (entry->ldev == dev) { - list_del(&entry->list); - kfree(entry); - } - } - mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); - - mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx); - - unregister_netdevice_queue(dev, head); - + real_dev->ieee802154_ptr->lowpan_dev = NULL; + unregister_netdevice(dev); dev_put(real_dev); } static struct rtnl_link_ops lowpan_link_ops __read_mostly = { .kind = "lowpan", - .priv_size = sizeof(struct lowpan_dev_info), + .priv_size = LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)), .setup = lowpan_setup, .newlink = lowpan_newlink, .dellink = lowpan_dellink, @@ -214,19 +197,21 @@ static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - LIST_HEAD(del_list); - struct lowpan_dev_record *entry, *tmp; if (dev->type != ARPHRD_IEEE802154) goto out; - if (event == NETDEV_UNREGISTER) { - list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { - if (lowpan_dev_info(entry->ldev)->real_dev == dev) - lowpan_dellink(entry->ldev, &del_list); - } - - unregister_netdevice_many(&del_list); + switch (event) { + case NETDEV_UNREGISTER: + /* Check if wpan interface is unregistered that we + * also delete possible lowpan interfaces which belongs + * to the wpan interface. + */ + if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev) + lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL); + break; + default: + break; } out: diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c index 4be1d289ab2d..12e10201d263 100644 --- a/net/ieee802154/6lowpan/rx.c +++ b/net/ieee802154/6lowpan/rx.c @@ -15,36 +15,14 @@ #include "6lowpan_i.h" -static int lowpan_give_skb_to_devices(struct sk_buff *skb, - struct net_device *dev) +static int lowpan_give_skb_to_device(struct sk_buff *skb, + struct net_device *dev) { - struct lowpan_dev_record *entry; - struct sk_buff *skb_cp; - int stat = NET_RX_SUCCESS; - + skb->dev = dev->ieee802154_ptr->lowpan_dev; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; - rcu_read_lock(); - list_for_each_entry_rcu(entry, &lowpan_devices, list) - if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) { - skb_cp = skb_copy(skb, GFP_ATOMIC); - if (!skb_cp) { - kfree_skb(skb); - rcu_read_unlock(); - return NET_RX_DROP; - } - - skb_cp->dev = entry->ldev; - stat = netif_rx(skb_cp); - if (stat == NET_RX_DROP) - break; - } - rcu_read_unlock(); - - consume_skb(skb); - - return stat; + return netif_rx(skb); } static int @@ -89,6 +67,10 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, struct ieee802154_hdr hdr; int ret; + if (dev->type != ARPHRD_IEEE802154 || + !dev->ieee802154_ptr->lowpan_dev) + goto drop; + skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto drop; @@ -99,9 +81,6 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (skb->pkt_type == PACKET_OTHERHOST) goto drop_skb; - if (dev->type != ARPHRD_IEEE802154) - goto drop_skb; - if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) goto drop_skb; @@ -109,7 +88,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { /* Pull off the 1-byte of 6lowpan header. */ skb_pull(skb, 1); - return lowpan_give_skb_to_devices(skb, NULL); + return lowpan_give_skb_to_device(skb, dev); } else { switch (skb->data[0] & 0xe0) { case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ @@ -117,7 +96,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (ret < 0) goto drop_skb; - return lowpan_give_skb_to_devices(skb, NULL); + return lowpan_give_skb_to_device(skb, dev); case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1); if (ret == 1) { @@ -125,7 +104,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (ret < 0) goto drop_skb; - return lowpan_give_skb_to_devices(skb, NULL); + return lowpan_give_skb_to_device(skb, dev); } else if (ret == -1) { return NET_RX_DROP; } else { @@ -138,7 +117,7 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, if (ret < 0) goto drop_skb; - return lowpan_give_skb_to_devices(skb, NULL); + return lowpan_give_skb_to_device(skb, dev); } else if (ret == -1) { return NET_RX_DROP; } else { diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c index 2597abbf7f4b..f6263fc12340 100644 --- a/net/ieee802154/6lowpan/tx.c +++ b/net/ieee802154/6lowpan/tx.c @@ -112,7 +112,7 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); if (IS_ERR(frag)) - return -PTR_ERR(frag); + return PTR_ERR(frag); memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen); memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len); @@ -224,7 +224,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev) } else { da.mode = IEEE802154_ADDR_LONG; da.extended_addr = ieee802154_devaddr_from_raw(daddr); - cb->ackreq = wpan_dev->frame_retries >= 0; + cb->ackreq = wpan_dev->ackreq; } return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 68f24016860c..1b00a14850cb 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -230,6 +230,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED }, [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED }, + + [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 }, }; /* message building helper */ @@ -458,6 +460,7 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS); CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES); CMD(set_lbt_mode, SET_LBT_MODE); + CMD(set_ackreq_default, SET_ACKREQ_DEFAULT); if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) CMD(set_tx_power, SET_TX_POWER); @@ -656,6 +659,10 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt)) goto nla_put_failure; + /* ackreq default behaviour */ + if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq)) + goto nla_put_failure; + genlmsg_end(msg, hdr); return 0; @@ -1042,6 +1049,24 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info) return rdev_set_lbt_mode(rdev, wpan_dev, mode); } +static int +nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg802154_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; + bool ackreq; + + if (netif_running(dev)) + return -EBUSY; + + if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]) + return -EINVAL; + + ackreq = !!nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]); + return rdev_set_ackreq_default(rdev, wpan_dev, ackreq); +} + #define NL802154_FLAG_NEED_WPAN_PHY 0x01 #define NL802154_FLAG_NEED_NETDEV 0x02 #define NL802154_FLAG_NEED_RTNL 0x04 @@ -1248,6 +1273,14 @@ static const struct genl_ops nl802154_ops[] = { .internal_flags = NL802154_FLAG_NEED_NETDEV | NL802154_FLAG_NEED_RTNL, }, + { + .cmd = NL802154_CMD_SET_ACKREQ_DEFAULT, + .doit = nl802154_set_ackreq_default, + .policy = nl802154_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL802154_FLAG_NEED_NETDEV | + NL802154_FLAG_NEED_RTNL, + }, }; /* initialisation/exit functions */ diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h index 8d5960a37195..03b357501cc5 100644 --- a/net/ieee802154/rdev-ops.h +++ b/net/ieee802154/rdev-ops.h @@ -195,4 +195,17 @@ rdev_set_lbt_mode(struct cfg802154_registered_device *rdev, return ret; } +static inline int +rdev_set_ackreq_default(struct cfg802154_registered_device *rdev, + struct wpan_dev *wpan_dev, bool ackreq) +{ + int ret; + + trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev, + ackreq); + ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq); + trace_802154_rdev_return_int(&rdev->wpan_phy, ret); + return ret; +} + #endif /* __CFG802154_RDEV_OPS */ diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h index 4399b7fbaa31..9a471e41ec73 100644 --- a/net/ieee802154/trace.h +++ b/net/ieee802154/trace.h @@ -275,6 +275,25 @@ TRACE_EVENT(802154_rdev_set_lbt_mode, WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode)) ); +TRACE_EVENT(802154_rdev_set_ackreq_default, + TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, + bool ackreq), + TP_ARGS(wpan_phy, wpan_dev, ackreq), + TP_STRUCT__entry( + WPAN_PHY_ENTRY + WPAN_DEV_ENTRY + __field(bool, ackreq) + ), + TP_fast_assign( + WPAN_PHY_ASSIGN; + WPAN_DEV_ASSIGN; + __entry->ackreq = ackreq; + ), + TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT + ", ackreq default: %s", WPAN_PHY_PR_ARG, + WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq)) +); + TRACE_EVENT(802154_rdev_return_int, TP_PROTO(struct wpan_phy *wpan_phy, int ret), TP_ARGS(wpan_phy, ret), diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index cc4e498a0ccf..675e88cac2b4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -119,6 +119,7 @@ #ifdef CONFIG_IP_MROUTE #include #endif +#include /* The inetsw table contains everything that inet_create needs to @@ -427,6 +428,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct net *net = sock_net(sk); unsigned short snum; int chk_addr_ret; + int tb_id = RT_TABLE_LOCAL; int err; /* If the socket has its own bind function then use it. (RAW) */ @@ -448,7 +450,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; } - chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id; + chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); /* Not specified by any standard per-se, however it breaks too * many applications when removed. It is unfortunate since diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 34a308573f4b..30409b75e925 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -233,7 +233,7 @@ static int arp_constructor(struct neighbour *neigh) return -EINVAL; } - neigh->type = inet_addr_type(dev_net(dev), addr); + neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr); parms = in_dev->arp_parms; __neigh_parms_put(neigh->parms); @@ -343,7 +343,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { default: case 0: /* By default announce any local IP */ - if (skb && inet_addr_type(dev_net(dev), + if (skb && inet_addr_type_dev_table(dev_net(dev), dev, ip_hdr(skb)->saddr) == RTN_LOCAL) saddr = ip_hdr(skb)->saddr; break; @@ -351,7 +351,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) if (!skb) break; saddr = ip_hdr(skb)->saddr; - if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) { + if (inet_addr_type_dev_table(dev_net(dev), dev, + saddr) == RTN_LOCAL) { /* saddr should be known to target */ if (inet_addr_onlink(in_dev, target, saddr)) break; @@ -751,7 +752,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) /* Special case: IPv4 duplicate address detection packet (RFC2131) */ if (sip == 0) { if (arp->ar_op == htons(ARPOP_REQUEST) && - inet_addr_type(net, tip) == RTN_LOCAL && + inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL && !arp_ignore(in_dev, sip, tip)) arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, dev->dev_addr, sha); @@ -811,16 +812,18 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) n = __neigh_lookup(&arp_tbl, &sip, dev, 0); if (IN_DEV_ARP_ACCEPT(in_dev)) { + unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip); + /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) */ is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && - inet_addr_type(net, sip) == RTN_UNICAST; + addr_type == RTN_UNICAST; if (!n && ((arp->ar_op == htons(ARPOP_REPLY) && - inet_addr_type(net, sip) == RTN_UNICAST) || is_garp)) + addr_type == RTN_UNICAST) || is_garp)) n = __neigh_lookup(&arp_tbl, &sip, dev, 1); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6b98de0d7949..7fa277176c33 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -45,6 +45,7 @@ #include #include #include +#include #ifndef CONFIG_IP_MULTIPLE_TABLES @@ -211,12 +212,12 @@ void fib_flush_external(struct net *net) */ static inline unsigned int __inet_dev_addr_type(struct net *net, const struct net_device *dev, - __be32 addr) + __be32 addr, int tb_id) { struct flowi4 fl4 = { .daddr = addr }; struct fib_result res; unsigned int ret = RTN_BROADCAST; - struct fib_table *local_table; + struct fib_table *table; if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) return RTN_BROADCAST; @@ -225,10 +226,10 @@ static inline unsigned int __inet_dev_addr_type(struct net *net, rcu_read_lock(); - local_table = fib_get_table(net, RT_TABLE_LOCAL); - if (local_table) { + table = fib_get_table(net, tb_id); + if (table) { ret = RTN_UNICAST; - if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) { + if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) { if (!dev || dev == res.fi->fib_dev) ret = res.type; } @@ -238,19 +239,40 @@ static inline unsigned int __inet_dev_addr_type(struct net *net, return ret; } +unsigned int inet_addr_type_table(struct net *net, __be32 addr, int tb_id) +{ + return __inet_dev_addr_type(net, NULL, addr, tb_id); +} +EXPORT_SYMBOL(inet_addr_type_table); + unsigned int inet_addr_type(struct net *net, __be32 addr) { - return __inet_dev_addr_type(net, NULL, addr); + return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL); } EXPORT_SYMBOL(inet_addr_type); unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr) { - return __inet_dev_addr_type(net, dev, addr); + int rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL; + + return __inet_dev_addr_type(net, dev, addr, rt_table); } EXPORT_SYMBOL(inet_dev_addr_type); +/* inet_addr_type with dev == NULL but using the table from a dev + * if one is associated + */ +unsigned int inet_addr_type_dev_table(struct net *net, + const struct net_device *dev, + __be32 addr) +{ + int rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL; + + return __inet_dev_addr_type(net, NULL, addr, rt_table); +} +EXPORT_SYMBOL(inet_addr_type_dev_table); + __be32 fib_compute_spec_dst(struct sk_buff *skb) { struct net_device *dev = skb->dev; @@ -309,7 +331,9 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, bool dev_match; fl4.flowi4_oif = 0; - fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX; + fl4.flowi4_iif = vrf_master_ifindex_rcu(dev); + if (!fl4.flowi4_iif) + fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX; fl4.daddr = src; fl4.saddr = dst; fl4.flowi4_tos = tos; @@ -339,6 +363,9 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, if (nh->nh_dev == dev) { dev_match = true; break; + } else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) { + dev_match = true; + break; } } #else @@ -496,9 +523,12 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, addr = sk_extract_addr(&rt->rt_gateway); if (rt->rt_gateway.sa_family == AF_INET && addr) { + unsigned int addr_type; + cfg->fc_gw = addr; + addr_type = inet_addr_type_table(net, addr, cfg->fc_table); if (rt->rt_flags & RTF_GATEWAY && - inet_addr_type(net, addr) == RTN_UNICAST) + addr_type == RTN_UNICAST) cfg->fc_scope = RT_SCOPE_UNIVERSE; } @@ -770,6 +800,7 @@ out: static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa) { struct net *net = dev_net(ifa->ifa_dev->dev); + int tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev); struct fib_table *tb; struct fib_config cfg = { .fc_protocol = RTPROT_KERNEL, @@ -784,11 +815,10 @@ static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifad }, }; - if (type == RTN_UNICAST) - tb = fib_new_table(net, RT_TABLE_MAIN); - else - tb = fib_new_table(net, RT_TABLE_LOCAL); + if (!tb_id) + tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL; + tb = fib_new_table(net, tb_id); if (!tb) return; @@ -970,11 +1000,14 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim); } if (!(ok & LOCAL_OK)) { + unsigned int addr_type; + fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim); /* Check, that this local address finally disappeared. */ - if (gone && - inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) { + addr_type = inet_addr_type_dev_table(dev_net(dev), dev, + ifa->ifa_local); + if (gone && addr_type != RTN_LOCAL) { /* And the last, but not the least thing. * We must flush stray FIB entries. * diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 558e196bae0f..01f1c7dcd329 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -533,13 +533,13 @@ errout: #endif -int fib_encap_match(struct net *net, u16 encap_type, - struct nlattr *encap, - int oif, const struct fib_nh *nh) +static int fib_encap_match(struct net *net, u16 encap_type, + struct nlattr *encap, + int oif, const struct fib_nh *nh) { struct lwtunnel_state *lwtstate; struct net_device *dev = NULL; - int ret; + int ret, result = 0; if (encap_type == LWTUNNEL_ENCAP_NONE) return 0; @@ -548,10 +548,12 @@ int fib_encap_match(struct net *net, u16 encap_type, dev = __dev_get_by_index(net, oif); ret = lwtunnel_build_state(dev, encap_type, encap, &lwtstate); - if (!ret) - return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate); + if (!ret) { + result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate); + lwtstate_free(lwtstate); + } - return 0; + return result; } int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) @@ -670,16 +672,18 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, struct fib_result res; if (nh->nh_flags & RTNH_F_ONLINK) { + unsigned int addr_type; if (cfg->fc_scope >= RT_SCOPE_LINK) return -EINVAL; - if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST) - return -EINVAL; dev = __dev_get_by_index(net, nh->nh_oif); if (!dev) return -ENODEV; if (!(dev->flags & IFF_UP)) return -ENETDOWN; + addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw); + if (addr_type != RTN_UNICAST) + return -EINVAL; if (!netif_carrier_ok(dev)) nh->nh_flags |= RTNH_F_LINKDOWN; nh->nh_dev = dev; @@ -689,6 +693,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, } rcu_read_lock(); { + struct fib_table *tbl = NULL; struct flowi4 fl4 = { .daddr = nh->nh_gw, .flowi4_scope = cfg->fc_scope + 1, @@ -699,8 +704,24 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi, /* It is not necessary, but requires a bit of thinking */ if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; - err = fib_lookup(net, &fl4, &res, - FIB_LOOKUP_IGNORE_LINKSTATE); + + if (cfg->fc_table) + tbl = fib_get_table(net, cfg->fc_table); + + if (tbl) + err = fib_table_lookup(tbl, &fl4, &res, + FIB_LOOKUP_IGNORE_LINKSTATE | + FIB_LOOKUP_NOREF); + + /* on error or if no table given do full lookup. This + * is needed for example when nexthops are in the local + * table rather than the given table + */ + if (!tbl || err) { + err = fib_lookup(net, &fl4, &res, + FIB_LOOKUP_IGNORE_LINKSTATE); + } + if (err) { rcu_read_unlock(); return err; @@ -836,6 +857,23 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh) return nh->nh_saddr; } +static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc) +{ + if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || + fib_prefsrc != cfg->fc_dst) { + int tb_id = cfg->fc_table; + + if (tb_id == RT_TABLE_MAIN) + tb_id = RT_TABLE_LOCAL; + + if (inet_addr_type_table(cfg->fc_nlinfo.nl_net, + fib_prefsrc, tb_id) != RTN_LOCAL) { + return false; + } + } + return true; +} + struct fib_info *fib_create_info(struct fib_config *cfg) { int err; @@ -1031,12 +1069,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi->fib_flags |= RTNH_F_LINKDOWN; } - if (fi->fib_prefsrc) { - if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || - fi->fib_prefsrc != cfg->fc_dst) - if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL) - goto err_inval; - } + if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) + goto err_inval; change_nexthops(fi) { fib_info_update_nh_saddr(net, nexthop_nh); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 37c4bb89a708..1243c79cb5b0 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1423,8 +1423,11 @@ found: nh->nh_flags & RTNH_F_LINKDOWN && !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) continue; - if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) - continue; + if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { + if (flp->flowi4_oif && + flp->flowi4_oif != nh->nh_oif) + continue; + } if (!(fib_flags & FIB_LOOKUP_NOREF)) atomic_inc(&fi->fib_clntref); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 4a7b5b2a1ce3..d9c552a721fc 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -31,7 +31,6 @@ #include static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; -static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX]; int gre_add_protocol(const struct gre_protocol *proto, u8 version) { @@ -61,197 +60,6 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version) } EXPORT_SYMBOL_GPL(gre_del_protocol); -void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi, - int hdr_len) -{ - struct gre_base_hdr *greh; - - skb_push(skb, hdr_len); - - skb_reset_transport_header(skb); - greh = (struct gre_base_hdr *)skb->data; - greh->flags = tnl_flags_to_gre_flags(tpi->flags); - greh->protocol = tpi->proto; - - if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) { - __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); - - if (tpi->flags&TUNNEL_SEQ) { - *ptr = tpi->seq; - ptr--; - } - if (tpi->flags&TUNNEL_KEY) { - *ptr = tpi->key; - ptr--; - } - if (tpi->flags&TUNNEL_CSUM && - !(skb_shinfo(skb)->gso_type & - (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) { - *ptr = 0; - *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, - skb->len, 0)); - } - } -} -EXPORT_SYMBOL_GPL(gre_build_header); - -static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err) -{ - const struct gre_base_hdr *greh; - __be32 *options; - int hdr_len; - - if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) - return -EINVAL; - - greh = (struct gre_base_hdr *)skb_transport_header(skb); - if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) - return -EINVAL; - - tpi->flags = gre_flags_to_tnl_flags(greh->flags); - hdr_len = ip_gre_calc_hlen(tpi->flags); - - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - - greh = (struct gre_base_hdr *)skb_transport_header(skb); - tpi->proto = greh->protocol; - - options = (__be32 *)(greh + 1); - if (greh->flags & GRE_CSUM) { - if (skb_checksum_simple_validate(skb)) { - *csum_err = true; - return -EINVAL; - } - - skb_checksum_try_convert(skb, IPPROTO_GRE, 0, - null_compute_pseudo); - - options++; - } - - if (greh->flags & GRE_KEY) { - tpi->key = *options; - options++; - } else - tpi->key = 0; - - if (unlikely(greh->flags & GRE_SEQ)) { - tpi->seq = *options; - options++; - } else - tpi->seq = 0; - - /* WCCP version 1 and 2 protocol decoding. - * - Change protocol to IP - * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header - */ - if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { - tpi->proto = htons(ETH_P_IP); - if ((*(u8 *)options & 0xF0) != 0x40) { - hdr_len += 4; - if (!pskb_may_pull(skb, hdr_len)) - return -EINVAL; - } - } - - return iptunnel_pull_header(skb, hdr_len, tpi->proto); -} - -static int gre_cisco_rcv(struct sk_buff *skb) -{ - struct tnl_ptk_info tpi; - int i; - bool csum_err = false; - -#ifdef CONFIG_NET_IPGRE_BROADCAST - if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { - /* Looped back packet, drop it! */ - if (rt_is_output_route(skb_rtable(skb))) - goto drop; - } -#endif - - if (parse_gre_header(skb, &tpi, &csum_err) < 0) - goto drop; - - rcu_read_lock(); - for (i = 0; i < GRE_IP_PROTO_MAX; i++) { - struct gre_cisco_protocol *proto; - int ret; - - proto = rcu_dereference(gre_cisco_proto_list[i]); - if (!proto) - continue; - ret = proto->handler(skb, &tpi); - if (ret == PACKET_RCVD) { - rcu_read_unlock(); - return 0; - } - } - rcu_read_unlock(); - - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); -drop: - kfree_skb(skb); - return 0; -} - -static void gre_cisco_err(struct sk_buff *skb, u32 info) -{ - /* All the routers (except for Linux) return only - * 8 bytes of packet payload. It means, that precise relaying of - * ICMP in the real Internet is absolutely infeasible. - * - * Moreover, Cisco "wise men" put GRE key to the third word - * in GRE header. It makes impossible maintaining even soft - * state for keyed - * GRE tunnels with enabled checksum. Tell them "thank you". - * - * Well, I wonder, rfc1812 was written by Cisco employee, - * what the hell these idiots break standards established - * by themselves??? - */ - - const int type = icmp_hdr(skb)->type; - const int code = icmp_hdr(skb)->code; - struct tnl_ptk_info tpi; - bool csum_err = false; - int i; - - if (parse_gre_header(skb, &tpi, &csum_err)) { - if (!csum_err) /* ignore csum errors. */ - return; - } - - if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { - ipv4_update_pmtu(skb, dev_net(skb->dev), info, - skb->dev->ifindex, 0, IPPROTO_GRE, 0); - return; - } - if (type == ICMP_REDIRECT) { - ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, - IPPROTO_GRE, 0); - return; - } - - rcu_read_lock(); - for (i = 0; i < GRE_IP_PROTO_MAX; i++) { - struct gre_cisco_protocol *proto; - - proto = rcu_dereference(gre_cisco_proto_list[i]); - if (!proto) - continue; - - if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD) - goto out; - - } -out: - rcu_read_unlock(); -} - static int gre_rcv(struct sk_buff *skb) { const struct gre_protocol *proto; @@ -302,60 +110,19 @@ static const struct net_protocol net_gre_protocol = { .netns_ok = 1, }; -static const struct gre_protocol ipgre_protocol = { - .handler = gre_cisco_rcv, - .err_handler = gre_cisco_err, -}; - -int gre_cisco_register(struct gre_cisco_protocol *newp) -{ - struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) - &gre_cisco_proto_list[newp->priority]; - - return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY; -} -EXPORT_SYMBOL_GPL(gre_cisco_register); - -int gre_cisco_unregister(struct gre_cisco_protocol *del_proto) -{ - struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **) - &gre_cisco_proto_list[del_proto->priority]; - int ret; - - ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL; - - if (ret) - return ret; - - synchronize_net(); - return 0; -} -EXPORT_SYMBOL_GPL(gre_cisco_unregister); - static int __init gre_init(void) { pr_info("GRE over IPv4 demultiplexor driver\n"); if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { pr_err("can't add protocol\n"); - goto err; + return -EAGAIN; } - - if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) { - pr_info("%s: can't add ipgre handler\n", __func__); - goto err_gre; - } - return 0; -err_gre: - inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); -err: - return -EAGAIN; } static void __exit gre_exit(void) { - gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index c0556f1e4bf0..f16488efa1c8 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -96,6 +96,7 @@ #include #include #include +#include /* * Build xmit assembly blocks @@ -425,6 +426,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) fl4.flowi4_mark = mark; fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); fl4.flowi4_proto = IPPROTO_ICMP; + fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex; security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) @@ -458,6 +460,8 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; + fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex; + security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key(net, fl4); if (IS_ERR(rt)) @@ -480,7 +484,8 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) { + if (inet_addr_type_dev_table(net, skb_in->dev, + fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) err = PTR_ERR(rt2); @@ -829,7 +834,7 @@ static bool icmp_unreach(struct sk_buff *skb) */ if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && - inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { + inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) { net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", &ip_hdr(skb)->saddr, icmph->type, icmph->code, diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 60021d0d9326..05e3145f7dc3 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, } spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) + if (del_timer_sync(&req->rsk_timer)) reqsk_put(req); return found; } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index d96722ae8979..15762e758861 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -48,6 +48,7 @@ #include #include #include +#include /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c @@ -77,6 +78,7 @@ struct ipq { u8 ecn; /* RFC3168 support */ u16 max_df_size; /* largest frag with DF set seen */ int iif; + int vif; /* VRF device index */ unsigned int rid; struct inet_peer *peer; }; @@ -99,6 +101,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct ip4_create_arg { struct iphdr *iph; u32 user; + int vif; }; static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) @@ -127,7 +130,8 @@ static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) qp->saddr == arg->iph->saddr && qp->daddr == arg->iph->daddr && qp->protocol == arg->iph->protocol && - qp->user == arg->user; + qp->user == arg->user && + qp->vif == arg->vif; } static void ip4_frag_init(struct inet_frag_queue *q, const void *a) @@ -144,6 +148,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a) qp->ecn = ip4_frag_ecn(arg->iph->tos); qp->saddr = arg->iph->saddr; qp->daddr = arg->iph->daddr; + qp->vif = arg->vif; qp->user = arg->user; qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; @@ -244,7 +249,8 @@ out: /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ -static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) +static struct ipq *ip_find(struct net *net, struct iphdr *iph, + u32 user, int vif) { struct inet_frag_queue *q; struct ip4_create_arg arg; @@ -252,6 +258,7 @@ static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) arg.iph = iph; arg.user = user; + arg.vif = vif; hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); @@ -648,14 +655,15 @@ out_fail: /* Process an incoming IP datagram fragment. */ int ip_defrag(struct sk_buff *skb, u32 user) { + struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; + int vif = vrf_master_ifindex_rcu(dev); + struct net *net = dev_net(dev); struct ipq *qp; - struct net *net; - net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); /* Lookup (or create) queue header */ - qp = ip_find(net, ip_hdr(skb), user); + qp = ip_find(net, ip_hdr(skb), user, vif); if (qp) { int ret; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5fd706473c73..fb44d693796e 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -47,6 +48,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -121,8 +123,127 @@ static int ipgre_tunnel_init(struct net_device *dev); static int ipgre_net_id __read_mostly; static int gre_tap_net_id __read_mostly; -static int ipgre_err(struct sk_buff *skb, u32 info, - const struct tnl_ptk_info *tpi) +static int ip_gre_calc_hlen(__be16 o_flags) +{ + int addend = 4; + + if (o_flags & TUNNEL_CSUM) + addend += 4; + if (o_flags & TUNNEL_KEY) + addend += 4; + if (o_flags & TUNNEL_SEQ) + addend += 4; + return addend; +} + +static __be16 gre_flags_to_tnl_flags(__be16 flags) +{ + __be16 tflags = 0; + + if (flags & GRE_CSUM) + tflags |= TUNNEL_CSUM; + if (flags & GRE_ROUTING) + tflags |= TUNNEL_ROUTING; + if (flags & GRE_KEY) + tflags |= TUNNEL_KEY; + if (flags & GRE_SEQ) + tflags |= TUNNEL_SEQ; + if (flags & GRE_STRICT) + tflags |= TUNNEL_STRICT; + if (flags & GRE_REC) + tflags |= TUNNEL_REC; + if (flags & GRE_VERSION) + tflags |= TUNNEL_VERSION; + + return tflags; +} + +static __be16 tnl_flags_to_gre_flags(__be16 tflags) +{ + __be16 flags = 0; + + if (tflags & TUNNEL_CSUM) + flags |= GRE_CSUM; + if (tflags & TUNNEL_ROUTING) + flags |= GRE_ROUTING; + if (tflags & TUNNEL_KEY) + flags |= GRE_KEY; + if (tflags & TUNNEL_SEQ) + flags |= GRE_SEQ; + if (tflags & TUNNEL_STRICT) + flags |= GRE_STRICT; + if (tflags & TUNNEL_REC) + flags |= GRE_REC; + if (tflags & TUNNEL_VERSION) + flags |= GRE_VERSION; + + return flags; +} + +static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, + bool *csum_err) +{ + const struct gre_base_hdr *greh; + __be32 *options; + int hdr_len; + + if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + return -EINVAL; + + greh = (struct gre_base_hdr *)skb_transport_header(skb); + if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) + return -EINVAL; + + tpi->flags = gre_flags_to_tnl_flags(greh->flags); + hdr_len = ip_gre_calc_hlen(tpi->flags); + + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + + greh = (struct gre_base_hdr *)skb_transport_header(skb); + tpi->proto = greh->protocol; + + options = (__be32 *)(greh + 1); + if (greh->flags & GRE_CSUM) { + if (skb_checksum_simple_validate(skb)) { + *csum_err = true; + return -EINVAL; + } + + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + options++; + } + + if (greh->flags & GRE_KEY) { + tpi->key = *options; + options++; + } else { + tpi->key = 0; + } + if (unlikely(greh->flags & GRE_SEQ)) { + tpi->seq = *options; + options++; + } else { + tpi->seq = 0; + } + /* WCCP version 1 and 2 protocol decoding. + * - Change protocol to IP + * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header + */ + if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { + tpi->proto = htons(ETH_P_IP); + if ((*(u8 *)options & 0xF0) != 0x40) { + hdr_len += 4; + if (!pskb_may_pull(skb, hdr_len)) + return -EINVAL; + } + } + return iptunnel_pull_header(skb, hdr_len, tpi->proto); +} + +static void ipgre_err(struct sk_buff *skb, u32 info, + const struct tnl_ptk_info *tpi) { /* All the routers (except for Linux) return only @@ -148,14 +269,14 @@ static int ipgre_err(struct sk_buff *skb, u32 info, switch (type) { default: case ICMP_PARAMETERPROB: - return PACKET_RCVD; + return; case ICMP_DEST_UNREACH: switch (code) { case ICMP_SR_FAILED: case ICMP_PORT_UNREACH: /* Impossible event. */ - return PACKET_RCVD; + return; default: /* All others are translated to HOST_UNREACH. rfc2003 contains "deep thoughts" about NET_UNREACH, @@ -164,9 +285,10 @@ static int ipgre_err(struct sk_buff *skb, u32 info, break; } break; + case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) - return PACKET_RCVD; + return; break; case ICMP_REDIRECT: @@ -183,26 +305,85 @@ static int ipgre_err(struct sk_buff *skb, u32 info, iph->daddr, iph->saddr, tpi->key); if (!t) - return PACKET_REJECT; + return; if (t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr)) - return PACKET_RCVD; + return; if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) - return PACKET_RCVD; + return; if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) t->err_count++; else t->err_count = 1; t->err_time = jiffies; - return PACKET_RCVD; +} + +static void gre_err(struct sk_buff *skb, u32 info) +{ + /* All the routers (except for Linux) return only + * 8 bytes of packet payload. It means, that precise relaying of + * ICMP in the real Internet is absolutely infeasible. + * + * Moreover, Cisco "wise men" put GRE key to the third word + * in GRE header. It makes impossible maintaining even soft + * state for keyed + * GRE tunnels with enabled checksum. Tell them "thank you". + * + * Well, I wonder, rfc1812 was written by Cisco employee, + * what the hell these idiots break standards established + * by themselves??? + */ + + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; + struct tnl_ptk_info tpi; + bool csum_err = false; + + if (parse_gre_header(skb, &tpi, &csum_err)) { + if (!csum_err) /* ignore csum errors. */ + return; + } + + if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { + ipv4_update_pmtu(skb, dev_net(skb->dev), info, + skb->dev->ifindex, 0, IPPROTO_GRE, 0); + return; + } + if (type == ICMP_REDIRECT) { + ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0, + IPPROTO_GRE, 0); + return; + } + + ipgre_err(skb, info, &tpi); +} + +static __be64 key_to_tunnel_id(__be32 key) +{ +#ifdef __BIG_ENDIAN + return (__force __be64)((__force u32)key); +#else + return (__force __be64)((__force u64)key << 32); +#endif +} + +/* Returns the least-significant 32 bits of a __be64. */ +static __be32 tunnel_id_to_key(__be64 x) +{ +#ifdef __BIG_ENDIAN + return (__force __be32)x; +#else + return (__force __be32)((__force u64)x >> 32); +#endif } static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) { struct net *net = dev_net(skb->dev); + struct metadata_dst *tun_dst = NULL; struct ip_tunnel_net *itn; const struct iphdr *iph; struct ip_tunnel *tunnel; @@ -218,40 +399,194 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi) if (tunnel) { skb_pop_mac_header(skb); - ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error); + if (tunnel->collect_md) { + struct ip_tunnel_info *info; + + tun_dst = metadata_dst_alloc(0, GFP_ATOMIC); + if (!tun_dst) + return PACKET_REJECT; + + info = &tun_dst->u.tun_info; + info->key.ipv4_src = iph->saddr; + info->key.ipv4_dst = iph->daddr; + info->key.ipv4_tos = iph->tos; + info->key.ipv4_ttl = iph->ttl; + + info->mode = IP_TUNNEL_INFO_RX; + info->key.tun_flags = tpi->flags & + (TUNNEL_CSUM | TUNNEL_KEY); + info->key.tun_id = key_to_tunnel_id(tpi->key); + + info->key.tp_src = 0; + info->key.tp_dst = 0; + } + + ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } return PACKET_REJECT; } +static int gre_rcv(struct sk_buff *skb) +{ + struct tnl_ptk_info tpi; + bool csum_err = false; + +#ifdef CONFIG_NET_IPGRE_BROADCAST + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { + /* Looped back packet, drop it! */ + if (rt_is_output_route(skb_rtable(skb))) + goto drop; + } +#endif + + if (parse_gre_header(skb, &tpi, &csum_err) < 0) + goto drop; + + if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) + return 0; + + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); +drop: + kfree_skb(skb); + return 0; +} + +static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags, + __be16 proto, __be32 key, __be32 seq) +{ + struct gre_base_hdr *greh; + + skb_push(skb, hdr_len); + + skb_reset_transport_header(skb); + greh = (struct gre_base_hdr *)skb->data; + greh->flags = tnl_flags_to_gre_flags(flags); + greh->protocol = proto; + + if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { + __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); + + if (flags & TUNNEL_SEQ) { + *ptr = seq; + ptr--; + } + if (flags & TUNNEL_KEY) { + *ptr = key; + ptr--; + } + if (flags & TUNNEL_CSUM && + !(skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { + *ptr = 0; + *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0, + skb->len, 0)); + } + } +} + static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, __be16 proto) { struct ip_tunnel *tunnel = netdev_priv(dev); - struct tnl_ptk_info tpi; - tpi.flags = tunnel->parms.o_flags; - tpi.proto = proto; - tpi.key = tunnel->parms.o_key; if (tunnel->parms.o_flags & TUNNEL_SEQ) tunnel->o_seqno++; - tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ - gre_build_header(skb, &tpi, tunnel->tun_hlen); - - skb_set_inner_protocol(skb, tpi.proto); + build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, + proto, tunnel->parms.o_key, htonl(tunnel->o_seqno)); + skb_set_inner_protocol(skb, proto); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } +static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, + bool csum) +{ + return iptunnel_handle_offloads(skb, csum, + csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); +} + +static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ip_tunnel_info *tun_info; + struct net *net = dev_net(dev); + const struct ip_tunnel_key *key; + struct flowi4 fl; + struct rtable *rt; + int min_headroom; + int tunnel_hlen; + __be16 df, flags; + int err; + + tun_info = skb_tunnel_info(skb, AF_INET); + if (unlikely(!tun_info || tun_info->mode != IP_TUNNEL_INFO_TX)) + goto err_free_skb; + + key = &tun_info->key; + memset(&fl, 0, sizeof(fl)); + fl.daddr = key->ipv4_dst; + fl.saddr = key->ipv4_src; + fl.flowi4_tos = RT_TOS(key->ipv4_tos); + fl.flowi4_mark = skb->mark; + fl.flowi4_proto = IPPROTO_GRE; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) + goto err_free_skb; + + tunnel_hlen = ip_gre_calc_hlen(key->tun_flags); + + min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + + tunnel_hlen + sizeof(struct iphdr); + if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { + int head_delta = SKB_DATA_ALIGN(min_headroom - + skb_headroom(skb) + + 16); + err = pskb_expand_head(skb, max_t(int, head_delta, 0), + 0, GFP_ATOMIC); + if (unlikely(err)) + goto err_free_rt; + } + + /* Push Tunnel header. */ + skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); + if (IS_ERR(skb)) { + skb = NULL; + goto err_free_rt; + } + + flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); + build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), + tunnel_id_to_key(tun_info->key.tun_id), 0); + + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr, + key->ipv4_dst, IPPROTO_GRE, + key->ipv4_tos, key->ipv4_ttl, df, false); + iptunnel_xmit_stats(err, &dev->stats, dev->tstats); + return; + +err_free_rt: + ip_rt_put(rt); +err_free_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; +} + static netdev_tx_t ipgre_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tnl_params; + if (tunnel->collect_md) { + gre_fb_xmit(skb, dev); + return NETDEV_TX_OK; + } + if (dev->header_ops) { /* Need space for new headers */ if (skb_cow_head(skb, dev->needed_headroom - @@ -277,7 +612,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, goto out; __gre_xmit(skb, dev, tnl_params, skb->protocol); - return NETDEV_TX_OK; free_skb: @@ -292,6 +626,11 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, { struct ip_tunnel *tunnel = netdev_priv(dev); + if (tunnel->collect_md) { + gre_fb_xmit(skb, dev); + return NETDEV_TX_OK; + } + skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); if (IS_ERR(skb)) goto out; @@ -300,7 +639,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, goto free_skb; __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB)); - return NETDEV_TX_OK; free_skb: @@ -530,10 +868,9 @@ static int ipgre_tunnel_init(struct net_device *dev) return ip_tunnel_init(dev); } -static struct gre_cisco_protocol ipgre_protocol = { - .handler = ipgre_rcv, - .err_handler = ipgre_err, - .priority = 0, +static const struct gre_protocol ipgre_protocol = { + .handler = gre_rcv, + .err_handler = gre_err, }; static int __net_init ipgre_init_net(struct net *net) @@ -596,8 +933,10 @@ out: return ipgre_tunnel_validate(tb, data); } -static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], - struct ip_tunnel_parm *parms) +static void ipgre_netlink_parms(struct net_device *dev, + struct nlattr *data[], + struct nlattr *tb[], + struct ip_tunnel_parm *parms) { memset(parms, 0, sizeof(*parms)); @@ -635,6 +974,12 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) parms->iph.frag_off = htons(IP_DF); + + if (data[IFLA_GRE_COLLECT_METADATA]) { + struct ip_tunnel *t = netdev_priv(dev); + + t->collect_md = true; + } } /* This function returns true when ENCAP attributes are present in the nl msg */ @@ -712,7 +1057,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, return err; } - ipgre_netlink_parms(data, tb, &p); + ipgre_netlink_parms(dev, data, tb, &p); return ip_tunnel_newlink(dev, tb, &p); } @@ -730,7 +1075,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], return err; } - ipgre_netlink_parms(data, tb, &p); + ipgre_netlink_parms(dev, data, tb, &p); return ip_tunnel_changelink(dev, tb, &p); } @@ -765,6 +1110,8 @@ static size_t ipgre_get_size(const struct net_device *dev) nla_total_size(2) + /* IFLA_GRE_ENCAP_DPORT */ nla_total_size(2) + + /* IFLA_GRE_COLLECT_METADATA */ + nla_total_size(0) + 0; } @@ -796,6 +1143,11 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) t->encap.flags)) goto nla_put_failure; + if (t->collect_md) { + if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) + goto nla_put_failure; + } + return 0; nla_put_failure: @@ -817,6 +1169,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, + [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { @@ -849,9 +1202,38 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { .get_link_net = ip_tunnel_get_link_net, }; +struct net_device *gretap_fb_dev_create(struct net *net, const char *name, + u8 name_assign_type) +{ + struct nlattr *tb[IFLA_MAX + 1]; + struct net_device *dev; + struct ip_tunnel *t; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &ipgre_tap_ops, tb); + if (IS_ERR(dev)) + return dev; + + /* Configure flow based GRE device. */ + t = netdev_priv(dev); + t->collect_md = true; + + err = ipgre_newlink(net, dev, tb, NULL); + if (err < 0) + goto out; + return dev; +out: + free_netdev(dev); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(gretap_fb_dev_create); + static int __net_init ipgre_tap_init_net(struct net *net) { - return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL); + return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0"); } static void __net_exit ipgre_tap_exit_net(struct net *net) @@ -881,7 +1263,7 @@ static int __init ipgre_init(void) if (err < 0) goto pnet_tap_faied; - err = gre_cisco_register(&ipgre_protocol); + err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); if (err < 0) { pr_info("%s: can't add protocol\n", __func__); goto add_proto_failed; @@ -900,7 +1282,7 @@ static int __init ipgre_init(void) tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: - gre_cisco_unregister(&ipgre_protocol); + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); add_proto_failed: unregister_pernet_device(&ipgre_tap_net_ops); pnet_tap_faied: @@ -912,7 +1294,7 @@ static void __exit ipgre_fini(void) { rtnl_link_unregister(&ipgre_tap_ops); rtnl_link_unregister(&ipgre_link_ops); - gre_cisco_unregister(&ipgre_protocol); + gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); unregister_pernet_device(&ipgre_tap_net_ops); unregister_pernet_device(&ipgre_net_ops); } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6bf89a6312bc..0138fada0951 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1542,6 +1542,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, struct net *net = sock_net(sk); struct sk_buff *nskb; int err; + int oif; if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) return; @@ -1559,7 +1560,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, daddr = replyopts.opt.opt.faddr; } - flowi4_init_output(&fl4, arg->bound_dev_if, + oif = arg->bound_dev_if; + if (!oif && netif_index_is_vrf(net, skb->skb_iif)) + oif = skb->skb_iif; + + flowi4_init_output(&fl4, oif, IP4_REPLY_MARK(net, skb->mark), RT_TOS(arg->tos), RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 626d9e56a6bd..cbb51f3fac06 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -230,10 +230,13 @@ skip_key_lookup: if (cand) return cand; + t = rcu_dereference(itn->collect_md_tun); + if (t) + return t; + if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP) return netdev_priv(itn->fb_tunnel_dev); - return NULL; } EXPORT_SYMBOL_GPL(ip_tunnel_lookup); @@ -261,11 +264,15 @@ static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t) { struct hlist_head *head = ip_bucket(itn, &t->parms); + if (t->collect_md) + rcu_assign_pointer(itn->collect_md_tun, t); hlist_add_head_rcu(&t->hash_node, head); } -static void ip_tunnel_del(struct ip_tunnel *t) +static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t) { + if (t->collect_md) + rcu_assign_pointer(itn->collect_md_tun, NULL); hlist_del_init_rcu(&t->hash_node); } @@ -419,7 +426,8 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net, } int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, - const struct tnl_ptk_info *tpi, bool log_ecn_error) + const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, + bool log_ecn_error) { struct pcpu_sw_netstats *tstats; const struct iphdr *iph = ip_hdr(skb); @@ -478,6 +486,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, skb->dev = tunnel->dev; } + if (tun_dst) + skb_dst_set(skb, (struct dst_entry *)tun_dst); + gro_cells_receive(&tunnel->gro_cells, skb); return 0; @@ -806,7 +817,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, struct ip_tunnel_parm *p, bool set_mtu) { - ip_tunnel_del(t); + ip_tunnel_del(itn, t); t->parms.iph.saddr = p->iph.saddr; t->parms.iph.daddr = p->iph.daddr; t->parms.i_key = p->i_key; @@ -967,7 +978,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id); if (itn->fb_tunnel_dev != dev) { - ip_tunnel_del(netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); unregister_netdevice_queue(dev, head); } } @@ -1072,8 +1083,13 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], nt = netdev_priv(dev); itn = net_generic(net, nt->ip_tnl_net_id); - if (ip_tunnel_find(itn, p, dev->type)) - return -EEXIST; + if (nt->collect_md) { + if (rtnl_dereference(itn->collect_md_tun)) + return -EEXIST; + } else { + if (ip_tunnel_find(itn, p, dev->type)) + return -EEXIST; + } nt->net = net; nt->parms = *p; @@ -1089,7 +1105,6 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], dev->mtu = mtu; ip_tunnel_add(itn, nt); - out: return err; } @@ -1163,6 +1178,10 @@ int ip_tunnel_init(struct net_device *dev) iph->version = 4; iph->ihl = 5; + if (tunnel->collect_md) { + dev->features |= NETIF_F_NETNS_LOCAL; + netif_keep_dst(dev); + } return 0; } EXPORT_SYMBOL_GPL(ip_tunnel_init); @@ -1176,7 +1195,7 @@ void ip_tunnel_uninit(struct net_device *dev) itn = net_generic(net, tunnel->ip_tnl_net_id); /* fb_tunnel_dev will be unregisted in net-exit call. */ if (itn->fb_tunnel_dev != dev) - ip_tunnel_del(netdev_priv(dev)); + ip_tunnel_del(itn, netdev_priv(dev)); ip_tunnel_dst_reset_all(tunnel); } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 5512f4e4ec1b..1c2389d582a6 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -192,15 +192,15 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, } EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); -static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = { - [IP_TUN_ID] = { .type = NLA_U64 }, - [IP_TUN_DST] = { .type = NLA_U32 }, - [IP_TUN_SRC] = { .type = NLA_U32 }, - [IP_TUN_TTL] = { .type = NLA_U8 }, - [IP_TUN_TOS] = { .type = NLA_U8 }, - [IP_TUN_SPORT] = { .type = NLA_U16 }, - [IP_TUN_DPORT] = { .type = NLA_U16 }, - [IP_TUN_FLAGS] = { .type = NLA_U16 }, +static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = { + [LWTUNNEL_IP_ID] = { .type = NLA_U64 }, + [LWTUNNEL_IP_DST] = { .type = NLA_U32 }, + [LWTUNNEL_IP_SRC] = { .type = NLA_U32 }, + [LWTUNNEL_IP_TTL] = { .type = NLA_U8 }, + [LWTUNNEL_IP_TOS] = { .type = NLA_U8 }, + [LWTUNNEL_IP_SPORT] = { .type = NLA_U16 }, + [LWTUNNEL_IP_DPORT] = { .type = NLA_U16 }, + [LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 }, }; static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, @@ -208,10 +208,10 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, { struct ip_tunnel_info *tun_info; struct lwtunnel_state *new_state; - struct nlattr *tb[IP_TUN_MAX + 1]; + struct nlattr *tb[LWTUNNEL_IP_MAX + 1]; int err; - err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy); + err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy); if (err < 0) return err; @@ -223,29 +223,29 @@ static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr, tun_info = lwt_tun_info(new_state); - if (tb[IP_TUN_ID]) - tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]); + if (tb[LWTUNNEL_IP_ID]) + tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]); - if (tb[IP_TUN_DST]) - tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]); + if (tb[LWTUNNEL_IP_DST]) + tun_info->key.ipv4_dst = nla_get_be32(tb[LWTUNNEL_IP_DST]); - if (tb[IP_TUN_SRC]) - tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]); + if (tb[LWTUNNEL_IP_SRC]) + tun_info->key.ipv4_src = nla_get_be32(tb[LWTUNNEL_IP_SRC]); - if (tb[IP_TUN_TTL]) - tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]); + if (tb[LWTUNNEL_IP_TTL]) + tun_info->key.ipv4_ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]); - if (tb[IP_TUN_TOS]) - tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]); + if (tb[LWTUNNEL_IP_TOS]) + tun_info->key.ipv4_tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]); - if (tb[IP_TUN_SPORT]) - tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]); + if (tb[LWTUNNEL_IP_SPORT]) + tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]); - if (tb[IP_TUN_DPORT]) - tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]); + if (tb[LWTUNNEL_IP_DPORT]) + tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]); - if (tb[IP_TUN_FLAGS]) - tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]); + if (tb[LWTUNNEL_IP_FLAGS]) + tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]); tun_info->mode = IP_TUNNEL_INFO_TX; tun_info->options = NULL; @@ -261,14 +261,14 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, { struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate); - if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) || - nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) || - nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) || - nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) || - nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) || - nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) || - nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) || - nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags)) + if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) || + nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.ipv4_dst) || + nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.ipv4_src) || + nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.ipv4_tos) || + nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ipv4_ttl) || + nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) || + nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) || + nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags)) return -ENOMEM; return 0; @@ -276,20 +276,27 @@ static int ip_tun_fill_encap_info(struct sk_buff *skb, static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate) { - return nla_total_size(8) /* IP_TUN_ID */ - + nla_total_size(4) /* IP_TUN_DST */ - + nla_total_size(4) /* IP_TUN_SRC */ - + nla_total_size(1) /* IP_TUN_TOS */ - + nla_total_size(1) /* IP_TUN_TTL */ - + nla_total_size(2) /* IP_TUN_SPORT */ - + nla_total_size(2) /* IP_TUN_DPORT */ - + nla_total_size(2); /* IP_TUN_FLAGS */ + return nla_total_size(8) /* LWTUNNEL_IP_ID */ + + nla_total_size(4) /* LWTUNNEL_IP_DST */ + + nla_total_size(4) /* LWTUNNEL_IP_SRC */ + + nla_total_size(1) /* LWTUNNEL_IP_TOS */ + + nla_total_size(1) /* LWTUNNEL_IP_TTL */ + + nla_total_size(2) /* LWTUNNEL_IP_SPORT */ + + nla_total_size(2) /* LWTUNNEL_IP_DPORT */ + + nla_total_size(2); /* LWTUNNEL_IP_FLAGS */ +} + +static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) +{ + return memcmp(lwt_tun_info(a), lwt_tun_info(b), + sizeof(struct ip_tunnel_info)); } static const struct lwtunnel_encap_ops ip_tun_lwt_ops = { .build_state = ip_tun_build_state, .fill_encap = ip_tun_fill_encap_info, .get_encap_size = ip_tun_encap_nlsize, + .cmp_encap = ip_tun_cmp_encap, }; void __init ip_tunnel_core_init(void) diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 8e7328c6a390..ed4ef09c2136 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -94,7 +94,7 @@ /* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */ #define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */ #define CONF_SEND_RETRIES 6 /* Send six requests per open */ -#define CONF_INTER_TIMEOUT (HZ/2) /* Inter-device timeout: 1/2 second */ +#define CONF_INTER_TIMEOUT (HZ) /* Inter-device timeout: 1 second */ #define CONF_BASE_TIMEOUT (HZ*2) /* Initial timeout: 2 seconds */ #define CONF_TIMEOUT_RANDOM (HZ) /* Maximum amount of randomization */ #define CONF_TIMEOUT_MULT *7/4 /* Rate of timeout growth */ diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 254238daf58b..f34c31defafe 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -198,7 +198,7 @@ static int ipip_rcv(struct sk_buff *skb) goto drop; if (iptunnel_pull_header(skb, 0, tpi.proto)) goto drop; - return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error); } return -1; diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index 4bf3dc49ad1e..270765236f5e 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c @@ -72,7 +72,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo) tcph->cwr = einfo->proto.tcp.cwr; inet_proto_csum_replace2(&tcph->check, skb, - oldval, ((__be16 *)tcph)[6], 0); + oldval, ((__be16 *)tcph)[6], false); return true; } diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index fe8cc183411e..95ea633e8356 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index e59cc05c09e9..22f4579b0c2a 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -120,7 +120,7 @@ static void nf_nat_ipv4_csum_update(struct sk_buff *skb, oldip = iph->daddr; newip = t->dst.u3.ip; } - inet_proto_csum_replace4(check, skb, oldip, newip, 1); + inet_proto_csum_replace4(check, skb, oldip, newip, true); } static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, @@ -151,7 +151,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, } } else inet_proto_csum_replace2(check, skb, - htons(oldlen), htons(datalen), 1); + htons(oldlen), htons(datalen), true); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c index 4557b4ab8342..7b98baa13ede 100644 --- a/net/ipv4/netfilter/nf_nat_proto_icmp.c +++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c @@ -67,7 +67,7 @@ icmp_manip_pkt(struct sk_buff *skb, hdr = (struct icmphdr *)(skb->data + hdroff); inet_proto_csum_replace2(&hdr->checksum, skb, - hdr->un.echo.id, tuple->src.u.icmp.id, 0); + hdr->un.echo.id, tuple->src.u.icmp.id, false); hdr->un.echo.id = tuple->src.u.icmp.id; return true; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 18fd7c9095c7..2403e85107f0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -112,6 +112,7 @@ #endif #include #include +#include #define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) @@ -1630,8 +1631,14 @@ static int __mkroute_input(struct sk_buff *skb, rth->dst.output = ip_output; rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag); - if (lwtunnel_output_redirect(rth->rt_lwtstate)) + if (lwtunnel_output_redirect(rth->rt_lwtstate)) { + rth->rt_lwtstate->orig_output = rth->dst.output; rth->dst.output = lwtunnel_output; + } + if (lwtunnel_input_redirect(rth->rt_lwtstate)) { + rth->rt_lwtstate->orig_input = rth->dst.input; + rth->dst.input = lwtunnel_input; + } skb_dst_set(skb, &rth->dst); out: err = 0; @@ -1726,7 +1733,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, * Now we are ready to route packet. */ fl4.flowi4_oif = 0; - fl4.flowi4_iif = dev->ifindex; + fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex; fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; @@ -2130,6 +2137,11 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) fl4->saddr = inet_select_addr(dev_out, 0, RT_SCOPE_HOST); } + if (netif_is_vrf(dev_out) && + !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) { + rth = vrf_dev_get_rth(dev_out); + goto out; + } } if (!fl4->daddr) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d27eb549ced6..93898e093d4e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7d1efa762b75..444ab5beecbd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2149,7 +2149,7 @@ repair: tcp_cwnd_validate(sk, is_cwnd_limited); return false; } - return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); + return !tp->packets_out && tcp_send_head(sk); } bool tcp_schedule_loss_probe(struct sock *sk) @@ -2226,7 +2226,7 @@ static bool skb_still_in_host_queue(const struct sock *sk, return false; } -/* When probe timeout (PTO) fires, send a new segment if one exists, else +/* When probe timeout (PTO) fires, try send a new segment if possible, else * retransmit the last segment. */ void tcp_send_loss_probe(struct sock *sk) @@ -2235,11 +2235,19 @@ void tcp_send_loss_probe(struct sock *sk) struct sk_buff *skb; int pcount; int mss = tcp_current_mss(sk); - int err = -1; - if (tcp_send_head(sk)) { - err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); - goto rearm_timer; + skb = tcp_send_head(sk); + if (skb) { + if (tcp_snd_wnd_test(tp, skb, mss)) { + pcount = tp->packets_out; + tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); + if (tp->packets_out > pcount) + goto probe_sent; + goto rearm_timer; + } + skb = tcp_write_queue_prev(sk, skb); + } else { + skb = tcp_write_queue_tail(sk); } /* At most one outstanding TLP retransmission. */ @@ -2247,7 +2255,6 @@ void tcp_send_loss_probe(struct sock *sk) goto rearm_timer; /* Retransmit last segment. */ - skb = tcp_write_queue_tail(sk); if (WARN_ON(!skb)) goto rearm_timer; @@ -2262,26 +2269,24 @@ void tcp_send_loss_probe(struct sock *sk) if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, GFP_ATOMIC))) goto rearm_timer; - skb = tcp_write_queue_tail(sk); + skb = tcp_write_queue_next(sk, skb); } if (WARN_ON(!skb || !tcp_skb_pcount(skb))) goto rearm_timer; - err = __tcp_retransmit_skb(sk, skb); + if (__tcp_retransmit_skb(sk, skb)) + goto rearm_timer; /* Record snd_nxt for loss detection. */ - if (likely(!err)) - tp->tlp_high_seq = tp->snd_nxt; + tp->tlp_high_seq = tp->snd_nxt; +probe_sent: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); + /* Reset s.t. tcp_rearm_rto will restart timer from now */ + inet_csk(sk)->icsk_pending = 0; rearm_timer: - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, - TCP_RTO_MAX); - - if (likely(!err)) - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPLOSSPROBES); + tcp_rearm_rto(sk); } /* Push out any pending frames which were held back due to diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 83aa604f9273..c0a15e7f359f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1013,11 +1013,31 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (!rt) { struct net *net = sock_net(sk); + __u8 flow_flags = inet_sk_flowi_flags(sk); fl4 = &fl4_stack; + + /* unconnected socket. If output device is enslaved to a VRF + * device lookup source address from VRF table. This mimics + * behavior of ip_route_connect{_init}. + */ + if (netif_index_is_vrf(net, ipc.oif)) { + flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, + RT_SCOPE_UNIVERSE, sk->sk_protocol, + (flow_flags | FLOWI_FLAG_VRFSRC), + faddr, saddr, dport, + inet->inet_sport); + + rt = ip_route_output_flow(net, fl4, sk); + if (!IS_ERR(rt)) { + saddr = fl4->saddr; + ip_rt_put(rt); + } + } + flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, - inet_sk_flowi_flags(sk), + flow_flags, faddr, saddr, dport, inet->inet_sport); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); @@ -1995,12 +2015,19 @@ void udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = sk->sk_rx_dst; + dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); - if (dst) - skb_dst_set_noref(skb, dst); + if (dst) { + /* DST_NOCACHE can not be used without taking a reference */ + if (dst->flags & DST_NOCACHE) { + if (likely(atomic_inc_not_zero(&dst->__refcnt))) + skb_dst_set(skb, dst); + } else { + skb_dst_set_noref(skb, dst); + } + } } int udp_rcv(struct sk_buff *skb) diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index bff69746e05f..55b3c0f4dde5 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -19,7 +19,7 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo; static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, - int tos, + int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr) { @@ -28,6 +28,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, memset(fl4, 0, sizeof(*fl4)); fl4->daddr = daddr->a4; fl4->flowi4_tos = tos; + fl4->flowi4_oif = oif; if (saddr) fl4->saddr = saddr->a4; @@ -38,22 +39,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, return ERR_CAST(rt); } -static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, +static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr) { struct flowi4 fl4; - return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr); + return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr); } -static int xfrm4_get_saddr(struct net *net, +static int xfrm4_get_saddr(struct net *net, int oif, xfrm_address_t *saddr, xfrm_address_t *daddr) { struct dst_entry *dst; struct flowi4 fl4; - dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr); + dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr); if (IS_ERR(dst)) return -EHOSTUNREACH; diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 643f61339e7b..983bb999738c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -92,6 +92,25 @@ config IPV6_MIP6 If unsure, say N. +config IPV6_ILA + tristate "IPv6: Identifier Locator Addressing (ILA)" + select LWTUNNEL + ---help--- + Support for IPv6 Identifier Locator Addressing (ILA). + + ILA is a mechanism to do network virtualization without + encapsulation. The basic concept of ILA is that we split an + IPv6 address into a 64 bit locator and 64 bit identifier. The + identifier is the identity of an entity in communication + ("who") and the locator expresses the location of the + entity ("where"). + + ILA can be configured using the "encap ila" option with + "ip -6 route" command. ILA is described in + https://tools.ietf.org/html/draft-herbert-nvo3-ila-00. + + If unsure, say N. + config INET6_XFRM_TUNNEL tristate select INET6_TUNNEL diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 0f3f1999719a..2c900c7b7eb1 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o obj-$(CONFIG_IPV6_MIP6) += mip6.o +obj-$(CONFIG_IPV6_ILA) += ila.o obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 53e3a9d756b0..59242399b0b5 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -214,6 +214,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .initialized = false, }, .use_oif_addrs_only = 0, + .ignore_routes_with_linkdown = 0, }; static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { @@ -257,6 +258,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { .initialized = false, }, .use_oif_addrs_only = 0, + .ignore_routes_with_linkdown = 0, }; /* Check if a valid qdisc is available */ @@ -472,6 +474,9 @@ static int inet6_netconf_msgsize_devconf(int type) if (type == -1 || type == NETCONFA_PROXY_NEIGH) size += nla_total_size(4); + if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) + size += nla_total_size(4); + return size; } @@ -508,6 +513,11 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0) goto nla_put_failure; + if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && + nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + devconf->ignore_routes_with_linkdown) < 0) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; @@ -544,6 +554,7 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = { [NETCONFA_IFINDEX] = { .len = sizeof(int) }, [NETCONFA_FORWARDING] = { .len = sizeof(int) }, [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, + [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) }, }; static int inet6_netconf_get_devconf(struct sk_buff *in_skb, @@ -766,6 +777,63 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) rt6_purge_dflt_routers(net); return 1; } + +static void addrconf_linkdown_change(struct net *net, __s32 newf) +{ + struct net_device *dev; + struct inet6_dev *idev; + + for_each_netdev(net, dev) { + idev = __in6_dev_get(dev); + if (idev) { + int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf); + + idev->cnf.ignore_routes_with_linkdown = newf; + if (changed) + inet6_netconf_notify_devconf(dev_net(dev), + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + dev->ifindex, + &idev->cnf); + } + } +} + +static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf) +{ + struct net *net; + int old; + + if (!rtnl_trylock()) + return restart_syscall(); + + net = (struct net *)table->extra2; + old = *p; + *p = newf; + + if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) { + if ((!newf) ^ (!old)) + inet6_netconf_notify_devconf(net, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + NETCONFA_IFINDEX_DEFAULT, + net->ipv6.devconf_dflt); + rtnl_unlock(); + return 0; + } + + if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) { + net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf; + addrconf_linkdown_change(net, newf); + if ((!newf) ^ (!old)) + inet6_netconf_notify_devconf(net, + NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, + NETCONFA_IFINDEX_ALL, + net->ipv6.devconf_all); + } + rtnl_unlock(); + + return 1; +} + #endif /* Nobody refers to this ifaddr, destroy it */ @@ -4616,6 +4684,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc; array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local; array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu; + array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown; /* we omit DEVCONF_STABLE_SECRET for now */ array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only; } @@ -4637,6 +4706,7 @@ static inline size_t inet6_if_nlmsg_size(void) + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ + nla_total_size(4) /* IFLA_MTU */ + nla_total_size(4) /* IFLA_LINK */ + + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */ } @@ -4893,7 +4963,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) || nla_put_u32(skb, IFLA_MTU, dev->mtu) || (dev->ifindex != dev_get_iflink(dev) && - nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev)))) + nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) || + nla_put_u8(skb, IFLA_OPERSTATE, + netif_running(dev) ? dev->operstate : IF_OPER_DOWN)) goto nla_put_failure; protoinfo = nla_nest_start(skb, IFLA_PROTINFO); if (!protoinfo) @@ -5338,6 +5410,34 @@ out: return err; } +static +int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl, + int write, + void __user *buffer, + size_t *lenp, + loff_t *ppos) +{ + int *valp = ctl->data; + int val = *valp; + loff_t pos = *ppos; + struct ctl_table lctl; + int ret; + + /* ctl->data points to idev->cnf.ignore_routes_when_linkdown + * we should not modify it until we get the rtnl lock. + */ + lctl = *ctl; + lctl.data = &val; + + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); + + if (write) + ret = addrconf_fixup_linkdown(ctl, valp, val); + if (ret) + *ppos = pos; + return ret; +} + static struct addrconf_sysctl_table { struct ctl_table_header *sysctl_header; @@ -5629,7 +5729,13 @@ static struct addrconf_sysctl_table .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, - + }, + { + .procname = "ignore_routes_with_linkdown", + .data = &ipv6_devconf.ignore_routes_with_linkdown, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown, }, { /* sentinel */ diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c new file mode 100644 index 000000000000..2540ab4b76d1 --- /dev/null +++ b/net/ipv6/ila.c @@ -0,0 +1,216 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ila_params { + __be64 locator; +}; + +static inline struct ila_params *ila_params_lwtunnel( + struct lwtunnel_state *lwstate) +{ + return (struct ila_params *)lwstate->data; +} + +static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to) +{ + __be32 diff[] = { + ~from[0], ~from[1], to[0], to[1], + }; + + return csum_partial(diff, sizeof(diff), 0); +} + +static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p) +{ + return compute_csum_diff8((__be32 *)&ip6h->daddr, + (__be32 *)&p->locator); +} + +static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p) +{ + __wsum diff; + struct ipv6hdr *ip6h = ipv6_hdr(skb); + size_t nhoff = sizeof(struct ipv6hdr); + + /* First update checksum */ + switch (ip6h->nexthdr) { + case NEXTHDR_TCP: + if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) { + struct tcphdr *th = (struct tcphdr *) + (skb_network_header(skb) + nhoff); + + diff = get_csum_diff(ip6h, p); + inet_proto_csum_replace_by_diff(&th->check, skb, + diff, true); + } + break; + case NEXTHDR_UDP: + if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) { + struct udphdr *uh = (struct udphdr *) + (skb_network_header(skb) + nhoff); + + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { + diff = get_csum_diff(ip6h, p); + inet_proto_csum_replace_by_diff(&uh->check, skb, + diff, true); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + } + break; + case NEXTHDR_ICMP: + if (likely(pskb_may_pull(skb, + nhoff + sizeof(struct icmp6hdr)))) { + struct icmp6hdr *ih = (struct icmp6hdr *) + (skb_network_header(skb) + nhoff); + + diff = get_csum_diff(ip6h, p); + inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb, + diff, true); + } + break; + } + + /* Now change destination address */ + *(__be64 *)&ip6h->daddr = p->locator; +} + +static int ila_output(struct sock *sk, struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct rt6_info *rt6 = NULL; + + if (skb->protocol != htons(ETH_P_IPV6)) + goto drop; + + rt6 = (struct rt6_info *)dst; + + update_ipv6_locator(skb, ila_params_lwtunnel(rt6->rt6i_lwtstate)); + + return rt6->rt6i_lwtstate->orig_output(sk, skb); + +drop: + kfree_skb(skb); + return -EINVAL; +} + +static int ila_input(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct rt6_info *rt6 = NULL; + + if (skb->protocol != htons(ETH_P_IPV6)) + goto drop; + + rt6 = (struct rt6_info *)dst; + + update_ipv6_locator(skb, ila_params_lwtunnel(rt6->rt6i_lwtstate)); + + return rt6->rt6i_lwtstate->orig_input(skb); + +drop: + kfree_skb(skb); + return -EINVAL; +} + +static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = { + [ILA_ATTR_LOCATOR] = { .type = NLA_U64, }, +}; + +static int ila_build_state(struct net_device *dev, struct nlattr *nla, + struct lwtunnel_state **ts) +{ + struct ila_params *p; + struct nlattr *tb[ILA_ATTR_MAX + 1]; + size_t encap_len = sizeof(*p); + struct lwtunnel_state *newts; + int ret; + + ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla, + ila_nl_policy); + if (ret < 0) + return ret; + + if (!tb[ILA_ATTR_LOCATOR]) + return -EINVAL; + + newts = lwtunnel_state_alloc(encap_len); + if (!newts) + return -ENOMEM; + + newts->len = encap_len; + p = ila_params_lwtunnel(newts); + + p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]); + + newts->type = LWTUNNEL_ENCAP_ILA; + newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT | + LWTUNNEL_STATE_INPUT_REDIRECT; + + *ts = newts; + + return 0; +} + +static int ila_fill_encap_info(struct sk_buff *skb, + struct lwtunnel_state *lwtstate) +{ + struct ila_params *p = ila_params_lwtunnel(lwtstate); + + if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static int ila_encap_nlsize(struct lwtunnel_state *lwtstate) +{ + /* No encapsulation overhead */ + return 0; +} + +static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) +{ + struct ila_params *a_p = ila_params_lwtunnel(a); + struct ila_params *b_p = ila_params_lwtunnel(b); + + return (a_p->locator != b_p->locator); +} + +static const struct lwtunnel_encap_ops ila_encap_ops = { + .build_state = ila_build_state, + .output = ila_output, + .input = ila_input, + .fill_encap = ila_fill_encap_info, + .get_encap_size = ila_encap_nlsize, + .cmp_encap = ila_encap_cmp, +}; + +static int __init ila_init(void) +{ + return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA); +} + +static void __exit ila_fini(void) +{ + lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA); +} + +module_init(ila_init); +module_exit(ila_fini); +MODULE_AUTHOR("Tom Herbert "); +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index 6edb7b106de7..ebbb754c2111 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr, } static void -synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, +synproxy_send_tcp(const struct synproxy_net *snet, + const struct sk_buff *skb, struct sk_buff *nskb, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct ipv6hdr *niph, struct tcphdr *nth, unsigned int tcp_hdr_size) { - struct net *net = nf_ct_net((struct nf_conn *)nfct); + struct net *net = nf_ct_net(snet->tmpl); struct dst_entry *dst; struct flowi6 fl6; @@ -83,7 +84,8 @@ free_nskb: } static void -synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, +synproxy_send_client_synack(const struct synproxy_net *snet, + const struct sk_buff *skb, const struct tcphdr *th, const struct synproxy_options *opts) { struct sk_buff *nskb; @@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size); } @@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, + synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, niph, nth, tcp_hdr_size); } @@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); } static void @@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet, synproxy_build_options(nth, opts); - synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); + synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, + niph, nth, tcp_hdr_size); } static bool @@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) XT_SYNPROXY_OPT_SACK_PERM | XT_SYNPROXY_OPT_ECN); - synproxy_send_client_synack(skb, th, &opts); + synproxy_send_client_synack(snet, skb, th, &opts); return NF_DROP; } else if (th->ack && !(th->fin || th->rst || th->syn)) { diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index e76900e0aa92..70fbaed49edb 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -124,7 +124,7 @@ static void nf_nat_ipv6_csum_update(struct sk_buff *skb, newip = &t->dst.u3.in6; } inet_proto_csum_replace16(check, skb, oldip->s6_addr32, - newip->s6_addr32, 1); + newip->s6_addr32, true); } static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb, @@ -155,7 +155,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb, } } else inet_proto_csum_replace2(check, skb, - htons(oldlen), htons(datalen), 1); + htons(oldlen), htons(datalen), true); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c index 2205e8eeeacf..57593b00c5b4 100644 --- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c @@ -73,7 +73,7 @@ icmpv6_manip_pkt(struct sk_buff *skb, hdr->icmp6_type == ICMPV6_ECHO_REPLY) { inet_proto_csum_replace2(&hdr->icmp6_cksum, skb, hdr->icmp6_identifier, - tuple->src.u.icmp.id, 0); + tuple->src.u.icmp.id, false); hdr->icmp6_identifier = tuple->src.u.icmp.id; } return true; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 54fccf0d705d..c3733049715e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -665,6 +665,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, { int m; bool match_do_rr = false; + struct inet6_dev *idev = rt->rt6i_idev; + struct net_device *dev = rt->dst.dev; + + if (dev && !netif_carrier_ok(dev) && + idev->cnf.ignore_routes_with_linkdown) + goto out; if (rt6_check_expired(rt)) goto out; @@ -1779,8 +1785,14 @@ int ip6_route_add(struct fib6_config *cfg) if (err) goto out; rt->rt6i_lwtstate = lwtstate_get(lwtstate); - if (lwtunnel_output_redirect(rt->rt6i_lwtstate)) + if (lwtunnel_output_redirect(rt->rt6i_lwtstate)) { + rt->rt6i_lwtstate->orig_output = rt->dst.output; rt->dst.output = lwtunnel_output6; + } + if (lwtunnel_input_redirect(rt->rt6i_lwtstate)) { + rt->rt6i_lwtstate->orig_input = rt->dst.input; + rt->dst.input = lwtunnel_input6; + } } ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); @@ -1844,6 +1856,7 @@ int ip6_route_add(struct fib6_config *cfg) int gwa_type; gw_addr = &cfg->fc_gateway; + gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() @@ -1851,11 +1864,12 @@ int ip6_route_add(struct fib6_config *cfg) * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; - if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) + if (ipv6_chk_addr_and_flags(net, gw_addr, + gwa_type & IPV6_ADDR_LINKLOCAL ? + dev : NULL, 0, 0)) goto out; rt->rt6i_gateway = *gw_addr; - gwa_type = ipv6_addr_type(gw_addr); if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt; @@ -2885,6 +2899,11 @@ static int rt6_fill_node(struct net *net, else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; + if (!netif_carrier_ok(rt->dst.dev)) { + rtm->rtm_flags |= RTNH_F_LINKDOWN; + if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) + rtm->rtm_flags |= RTNH_F_DEAD; + } rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_DYNAMIC) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index ac35a28599be..94428fd85b2f 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -742,7 +742,7 @@ static int ipip_rcv(struct sk_buff *skb) goto drop; if (iptunnel_pull_header(skb, 0, tpi.proto)) goto drop; - return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error); } return 1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 52dd0d9974d6..97d9314ea361 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); if (req) { nsk = tcp_check_req(sk, skb, req, false); - if (!nsk) + if (!nsk || nsk == sk) reqsk_put(req); return nsk; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e51fc3eee6db..0aba654f5b91 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1496,7 +1496,8 @@ int __net_init udp6_proc_init(struct net *net) return udp_proc_register(net, &udp6_seq_afinfo); } -void udp6_proc_exit(struct net *net) { +void udp6_proc_exit(struct net *net) +{ udp_proc_unregister(net, &udp6_seq_afinfo); } #endif /* CONFIG_PROC_FS */ diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 901ef6f8addc..f7fbdbabe50e 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -20,10 +20,9 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) { - const struct ipv6hdr *outer_iph = ipv6_hdr(skb); struct ipv6hdr *inner_iph = ipipv6_hdr(skb); - if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) + if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos)) IP6_ECN_set_ce(inner_iph); } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ed0583c1b9fc..a74013d3eceb 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -26,7 +26,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo; -static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, +static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr) { @@ -35,6 +35,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int err; memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = oif; memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); @@ -50,13 +51,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, return dst; } -static int xfrm6_get_saddr(struct net *net, +static int xfrm6_get_saddr(struct net *net, int oif, xfrm_address_t *saddr, xfrm_address_t *daddr) { struct dst_entry *dst; struct net_device *dev; - dst = xfrm6_dst_lookup(net, 0, NULL, daddr); + dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr); if (IS_ERR(dst)) return -EHOSTUNREACH; diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 086de496a4c1..3891cbd2adea 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -7,7 +7,6 @@ config MAC80211 select CRYPTO_CCM select CRYPTO_GCM select CRC32 - select AVERAGE ---help--- This option enables the hardware independent IEEE 802.11 networking stack. diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 3275f01881be..783e891b7525 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_MAC80211) += mac80211.o # mac80211 objects mac80211-y := \ main.o status.o \ + driver-ops.o \ sta_info.o \ wep.o \ wpa.o \ diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c index 4192806be3d3..bdf0790d89cc 100644 --- a/net/mac80211/aes_cmac.c +++ b/net/mac80211/aes_cmac.c @@ -145,20 +145,3 @@ void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) { crypto_free_cipher(tfm); } - -void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf, - u8 *k1, u8 *k2) -{ - u8 l[AES_BLOCK_SIZE] = {}; - struct ieee80211_key *key = - container_of(keyconf, struct ieee80211_key, conf); - - crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l); - - memcpy(k1, l, AES_BLOCK_SIZE); - gf_mulx(k1); - - memcpy(k2, k1, AES_BLOCK_SIZE); - gf_mulx(k2); -} -EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index bf7023f6c327..685ec13ed7c2 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1019,6 +1019,65 @@ static int sta_apply_auth_flags(struct ieee80211_local *local, return 0; } +static void sta_apply_mesh_params(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) +{ +#ifdef CONFIG_MAC80211_MESH + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 changed = 0; + + if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { + switch (params->plink_state) { + case NL80211_PLINK_ESTAB: + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) + changed = mesh_plink_inc_estab_count(sdata); + sta->mesh->plink_state = params->plink_state; + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + sdata->u.mesh.mshcfg.power_mode); + break; + case NL80211_PLINK_LISTEN: + case NL80211_PLINK_BLOCKED: + case NL80211_PLINK_OPN_SNT: + case NL80211_PLINK_OPN_RCVD: + case NL80211_PLINK_CNF_RCVD: + case NL80211_PLINK_HOLDING: + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + changed = mesh_plink_dec_estab_count(sdata); + sta->mesh->plink_state = params->plink_state; + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + NL80211_MESH_POWER_UNKNOWN); + break; + default: + /* nothing */ + break; + } + } + + switch (params->plink_action) { + case NL80211_PLINK_ACTION_NO_ACTION: + /* nothing */ + break; + case NL80211_PLINK_ACTION_OPEN: + changed |= mesh_plink_open(sta); + break; + case NL80211_PLINK_ACTION_BLOCK: + changed |= mesh_plink_block(sta); + break; + } + + if (params->local_pm) + changed |= ieee80211_mps_set_sta_local_pm(sta, + params->local_pm); + + ieee80211_mbss_info_change_notify(sdata, changed); +#endif +} + static int sta_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) @@ -1076,7 +1135,6 @@ static int sta_apply_parameters(struct ieee80211_local *local, } if (mask & BIT(NL80211_STA_FLAG_MFP)) { - sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP)); if (set & BIT(NL80211_STA_FLAG_MFP)) set_sta_flag(sta, WLAN_STA_MFP); else @@ -1097,6 +1155,12 @@ static int sta_apply_parameters(struct ieee80211_local *local, params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH); + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && + ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && + params->ext_capab_len >= 8 && + params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) + set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW); + if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) { sta->sta.uapsd_queues = params->uapsd_queues; sta->sta.max_sp = params->max_sp; @@ -1144,62 +1208,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, band, false); } - if (ieee80211_vif_is_mesh(&sdata->vif)) { -#ifdef CONFIG_MAC80211_MESH - u32 changed = 0; - - if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { - switch (params->plink_state) { - case NL80211_PLINK_ESTAB: - if (sta->plink_state != NL80211_PLINK_ESTAB) - changed = mesh_plink_inc_estab_count( - sdata); - sta->plink_state = params->plink_state; - - ieee80211_mps_sta_status_update(sta); - changed |= ieee80211_mps_set_sta_local_pm(sta, - sdata->u.mesh.mshcfg.power_mode); - break; - case NL80211_PLINK_LISTEN: - case NL80211_PLINK_BLOCKED: - case NL80211_PLINK_OPN_SNT: - case NL80211_PLINK_OPN_RCVD: - case NL80211_PLINK_CNF_RCVD: - case NL80211_PLINK_HOLDING: - if (sta->plink_state == NL80211_PLINK_ESTAB) - changed = mesh_plink_dec_estab_count( - sdata); - sta->plink_state = params->plink_state; - - ieee80211_mps_sta_status_update(sta); - changed |= ieee80211_mps_set_sta_local_pm(sta, - NL80211_MESH_POWER_UNKNOWN); - break; - default: - /* nothing */ - break; - } - } - - switch (params->plink_action) { - case NL80211_PLINK_ACTION_NO_ACTION: - /* nothing */ - break; - case NL80211_PLINK_ACTION_OPEN: - changed |= mesh_plink_open(sta); - break; - case NL80211_PLINK_ACTION_BLOCK: - changed |= mesh_plink_block(sta); - break; - } - - if (params->local_pm) - changed |= - ieee80211_mps_set_sta_local_pm(sta, - params->local_pm); - ieee80211_mbss_info_change_notify(sdata, changed); -#endif - } + if (ieee80211_vif_is_mesh(&sdata->vif)) + sta_apply_mesh_params(local, sta, params); /* set the STA state after all sta info from usermode has been set */ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { @@ -2358,6 +2368,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, const u8 *ap; enum ieee80211_smps_mode old_req; int err; + struct sta_info *sta; + bool tdls_peer_found = false; lockdep_assert_held(&sdata->wdev.mtx); @@ -2382,11 +2394,22 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, ap = sdata->u.mgd.associated->bssid; + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + tdls_peer_found = true; + break; + } + rcu_read_unlock(); + if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { - if (sdata->u.mgd.powersave) - smps_mode = IEEE80211_SMPS_DYNAMIC; - else + if (tdls_peer_found || !sdata->u.mgd.powersave) smps_mode = IEEE80211_SMPS_OFF; + else + smps_mode = IEEE80211_SMPS_DYNAMIC; } /* send SM PS frame to AP */ @@ -2394,6 +2417,8 @@ int __ieee80211_request_smps_mgd(struct ieee80211_sub_if_data *sdata, ap, ap); if (err) sdata->u.mgd.req_smps = old_req; + else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found) + ieee80211_teardown_tdls_peers(sdata); return err; } @@ -2479,16 +2504,26 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, sdata->rc_rateidx_mask[i] = mask->control[i].legacy; memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs, sizeof(mask->control[i].ht_mcs)); + memcpy(sdata->rc_rateidx_vht_mcs_mask[i], + mask->control[i].vht_mcs, + sizeof(mask->control[i].vht_mcs)); sdata->rc_has_mcs_mask[i] = false; + sdata->rc_has_vht_mcs_mask[i] = false; if (!sband) continue; - for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) - if (~sdata->rc_rateidx_mcs_mask[i][j]) { + for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) { + if (~sdata->rc_rateidx_mcs_mask[i][j]) sdata->rc_has_mcs_mask[i] = true; + + if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) + sdata->rc_has_vht_mcs_mask[i] = true; + + if (sdata->rc_has_mcs_mask[i] && + sdata->rc_has_vht_mcs_mask[i]) break; - } + } } return 0; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index f01c18a3160e..1d1b9b7bdefe 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -190,7 +190,7 @@ ieee80211_find_reservation_chanctx(struct ieee80211_local *local, return NULL; } -static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) +enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta) { switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_20: @@ -264,9 +264,17 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, case NL80211_IFTYPE_AP_VLAN: width = ieee80211_get_max_required_bw(sdata); break; + case NL80211_IFTYPE_STATION: + /* + * The ap's sta->bandwidth is not set yet at this + * point, so take the width from the chandef, but + * account also for TDLS peers + */ + width = max(vif->bss_conf.chandef.width, + ieee80211_get_max_required_bw(sdata)); + break; case NL80211_IFTYPE_P2P_DEVICE: continue; - case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MESH_POINT: @@ -554,12 +562,13 @@ static void ieee80211_free_chanctx(struct ieee80211_local *local, kfree_rcu(ctx, rcu_head); } -static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx) +void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx) { struct ieee80211_chanctx_conf *conf = &ctx->conf; struct ieee80211_sub_if_data *sdata; const struct cfg80211_chan_def *compat = NULL; + struct sta_info *sta; lockdep_assert_held(&local->chanctx_mtx); @@ -581,6 +590,20 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, if (WARN_ON_ONCE(!compat)) break; } + + /* TDLS peers can sometimes affect the chandef width */ + list_for_each_entry_rcu(sta, &local->sta_list, list) { + if (!sta->uploaded || + !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || + !sta->tdls_chandef.chan) + continue; + + compat = cfg80211_chandef_compatible(&sta->tdls_chandef, + compat); + if (WARN_ON_ONCE(!compat)) + break; + } rcu_read_unlock(); if (!compat) diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 3ea8b7de9633..ced6bf3be8d6 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -122,6 +122,7 @@ static const char *hw_flag_names[NUM_IEEE80211_HW_FLAGS + 1] = { FLAG(CHANCTX_STA_CSA), FLAG(SUPPORTS_CLONED_SKBS), FLAG(SINGLE_SCAN_ON_ALL_BANDS), + FLAG(TDLS_WIDER_BW), /* keep last for the build bug below */ (void *)0x1 @@ -277,7 +278,6 @@ void debugfs_hw_add(struct ieee80211_local *local) DEBUGFS_STATS_ADD(rx_handlers_queued); DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); - DEBUGFS_STATS_ADD(rx_handlers_drop_short); DEBUGFS_STATS_ADD(tx_expand_skb_head); DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag); diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index e82bf1e9d7a8..702ca122c498 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -57,7 +57,6 @@ KEY_CONF_FILE(keylen, D); KEY_CONF_FILE(keyidx, D); KEY_CONF_FILE(hw_key_idx, D); KEY_FILE(flags, X); -KEY_FILE(tx_rx_count, D); KEY_READ(ifindex, sdata->name, "%s\n"); KEY_OPS(ifindex); @@ -310,7 +309,6 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) DEBUGFS_ADD(flags); DEBUGFS_ADD(keyidx); DEBUGFS_ADD(hw_key_idx); - DEBUGFS_ADD(tx_rx_count); DEBUGFS_ADD(algorithm); DEBUGFS_ADD(tx_spec); DEBUGFS_ADD(rx_spec); diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index c09c0131bfa2..1021e87c051f 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -186,6 +186,38 @@ IEEE80211_IF_FILE(rc_rateidx_mcs_mask_2ghz, IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz, rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY); +static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz( + const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + int i, len = 0; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ]; + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); + len += scnprintf(buf + len, buflen - len, "\n"); + + return len; +} + +IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz); + +static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz( + const struct ieee80211_sub_if_data *sdata, + char *buf, int buflen) +{ + int i, len = 0; + const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ]; + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]); + len += scnprintf(buf + len, buflen - len, "\n"); + + return len; +} + +IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz); + IEEE80211_IF_FILE(flags, flags, HEX); IEEE80211_IF_FILE(state, state, LHEX); IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC); @@ -565,6 +597,8 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD(rc_rateidx_mask_5ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz); + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz); DEBUGFS_ADD(hw_queues); } diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c new file mode 100644 index 000000000000..267c3b1ca047 --- /dev/null +++ b/net/mac80211/driver-ops.c @@ -0,0 +1,41 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include "ieee80211_i.h" +#include "trace.h" +#include "driver-ops.h" + +__must_check +int drv_sta_state(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + int ret = 0; + + might_sleep(); + + sdata = get_bss_sdata(sdata); + if (!check_sdata_in_driver(sdata)) + return -EIO; + + trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); + if (local->ops->sta_state) { + ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta, + old_state, new_state); + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = drv_sta_add(local, sdata, &sta->sta); + if (ret == 0) + sta->uploaded = true; + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + drv_sta_remove(local, sdata, &sta->sta); + } + trace_drv_return_int(local, ret); + return ret; +} diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 32a2e707e222..02d91332d7dd 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -573,37 +573,12 @@ static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local, trace_drv_return_void(local); } -static inline __must_check +__must_check int drv_sta_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum ieee80211_sta_state old_state, - enum ieee80211_sta_state new_state) -{ - int ret = 0; - - might_sleep(); - - sdata = get_bss_sdata(sdata); - if (!check_sdata_in_driver(sdata)) - return -EIO; - - trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state); - if (local->ops->sta_state) { - ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta, - old_state, new_state); - } else if (old_state == IEEE80211_STA_AUTH && - new_state == IEEE80211_STA_ASSOC) { - ret = drv_sta_add(local, sdata, &sta->sta); - if (ret == 0) - sta->uploaded = true; - } else if (old_state == IEEE80211_STA_ASSOC && - new_state == IEEE80211_STA_AUTH) { - drv_sta_remove(local, sdata, &sta->sta); - } - trace_drv_return_int(local, ret); - return ret; -} + enum ieee80211_sta_state new_state); static inline void drv_sta_rc_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b12f61507f9f..6e52659f923f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -84,13 +84,13 @@ struct ieee80211_local; #define IEEE80211_DEAUTH_FRAME_LEN (24 /* hdr */ + 2 /* reason */) struct ieee80211_fragment_entry { - unsigned long first_frag_time; - unsigned int seq; - unsigned int rx_queue; - unsigned int last_frag; - unsigned int extra_len; struct sk_buff_head skb_list; - int ccmp; /* Whether fragments were encrypted with CCMP */ + unsigned long first_frag_time; + u16 seq; + u16 extra_len; + u16 last_frag; + u8 rx_queue; + bool ccmp; /* Whether fragments were encrypted with CCMP */ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ }; @@ -181,7 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result; /** * enum ieee80211_packet_rx_flags - packet RX flags - * @IEEE80211_RX_FRAGMENTED: fragmented frame * @IEEE80211_RX_AMSDU: a-MSDU packet * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering @@ -190,7 +189,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result; * @rx_flags field of &struct ieee80211_rx_status. */ enum ieee80211_packet_rx_flags { - IEEE80211_RX_FRAGMENTED = BIT(2), IEEE80211_RX_AMSDU = BIT(3), IEEE80211_RX_MALFORMED_ACTION_FRM = BIT(4), IEEE80211_RX_DEFERRED_RELEASE = BIT(5), @@ -202,8 +200,6 @@ enum ieee80211_packet_rx_flags { * @IEEE80211_RX_CMNTR: received on cooked monitor already * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). - * @IEEE80211_RX_REORDER_TIMER: this frame is released by the - * reorder buffer timeout timer, not the normal RX path * * These flags are used across handling multiple interfaces * for a single frame. @@ -211,10 +207,10 @@ enum ieee80211_packet_rx_flags { enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), IEEE80211_RX_BEACON_REPORTED = BIT(1), - IEEE80211_RX_REORDER_TIMER = BIT(2), }; struct ieee80211_rx_data { + struct napi_struct *napi; struct sk_buff *skb; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; @@ -725,6 +721,7 @@ struct ieee80211_if_mesh { * back to wireless media and to the local net stack. * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver + * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability */ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), @@ -732,6 +729,7 @@ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), IEEE80211_SDATA_IN_DRIVER = BIT(5), + IEEE80211_SDATA_MU_MIMO_OWNER = BIT(6), }; /** @@ -903,6 +901,9 @@ struct ieee80211_sub_if_data { bool rc_has_mcs_mask[IEEE80211_NUM_BANDS]; u8 rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN]; + bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS]; + u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX]; + union { struct ieee80211_if_ap ap; struct ieee80211_if_wds wds; @@ -1010,7 +1011,6 @@ enum sdata_queue_type { IEEE80211_SDATA_QUEUE_AGG_STOP = 2, IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, - IEEE80211_SDATA_QUEUE_TDLS_CHSW = 5, }; enum { @@ -1286,7 +1286,6 @@ struct ieee80211_local { unsigned int rx_handlers_queued; unsigned int rx_handlers_drop_nullfunc; unsigned int rx_handlers_drop_defrag; - unsigned int rx_handlers_drop_short; unsigned int tx_expand_skb_head; unsigned int tx_expand_skb_head_cloned; unsigned int rx_expand_skb_head_defrag; @@ -1348,14 +1347,16 @@ struct ieee80211_local { struct ieee80211_sub_if_data __rcu *p2p_sdata; - struct napi_struct *napi; - /* virtual monitor interface */ struct ieee80211_sub_if_data __rcu *monitor_sdata; struct cfg80211_chan_def monitor_chandef; /* extended capabilities provided by mac80211 */ u8 ext_capa[8]; + + /* TDLS channel switch */ + struct work_struct tdls_chsw_work; + struct sk_buff_head skb_queue_tdls_chsw; }; static inline struct ieee80211_sub_if_data * @@ -1715,6 +1716,8 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, enum ieee80211_band band, bool nss_only); void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_vht_cap *vht_cap); +void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, + u16 vht_mask[NL80211_VHT_NSS_MAX]); /* Spectrum management */ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, @@ -1763,8 +1766,6 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw) /* utility functions/constants */ extern const void *const mac80211_wiphy_privid; /* for wiphy privid */ -u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, - enum nl80211_iftype type); int ieee80211_frame_duration(enum ieee80211_band band, size_t len, int rate, int erp, int short_preamble, int shift); @@ -2042,6 +2043,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, enum ieee80211_chanctx_mode chanmode, u8 radar_detect); int ieee80211_max_num_channels(struct ieee80211_local *local); +enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta); +void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx); /* TDLS */ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, @@ -2058,8 +2062,8 @@ int ieee80211_tdls_channel_switch(struct wiphy *wiphy, struct net_device *dev, void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy, struct net_device *dev, const u8 *addr); -void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb); +void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata); +void ieee80211_tdls_chsw_work(struct work_struct *wk); extern const struct ethtool_ops ieee80211_ethtool_ops; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 553ac6dd4867..6964fc6a8ea2 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1242,8 +1242,6 @@ static void ieee80211_iface_work(struct work_struct *work) WLAN_BACK_RECIPIENT, 0, false); mutex_unlock(&local->sta_mtx); - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) { - ieee80211_process_tdls_channel_switch(sdata, skb); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; @@ -1790,13 +1788,23 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; - if (sband) + if (sband) { + __le16 cap; + u16 *vht_rate_mask; + memcpy(sdata->rc_rateidx_mcs_mask[i], sband->ht_cap.mcs.rx_mask, sizeof(sdata->rc_rateidx_mcs_mask[i])); - else + + cap = sband->vht_cap.vht_mcs.rx_mcs_map; + vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i]; + ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask); + } else { memset(sdata->rc_rateidx_mcs_mask[i], 0, sizeof(sdata->rc_rateidx_mcs_mask[i])); + memset(sdata->rc_rateidx_vht_mcs_mask[i], 0, + sizeof(sdata->rc_rateidx_vht_mcs_mask[i])); + } } ieee80211_set_default_queues(sdata); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index b22df3a79a41..44388d6a1d8e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -336,7 +336,6 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, ieee80211_check_fast_xmit(sta); } else { rcu_assign_pointer(sta->gtk[idx], new); - sta->gtk_idx = idx; } } else { defunikey = old && diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 3f4f9eaac140..9951ef06323e 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h @@ -115,9 +115,6 @@ struct ieee80211_key { } gen; } u; - /* number of times this key has been used */ - int tx_rx_count; - #ifdef CONFIG_MAC80211_DEBUGFS struct { struct dentry *stalink; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 3c63468b4dfb..ff79a13d231d 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -629,6 +629,8 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, INIT_WORK(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); + INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work); + spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); @@ -645,6 +647,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); + skb_queue_head_init(&local->skb_queue_tdls_chsw); ieee80211_alloc_led_names(local); @@ -1132,18 +1135,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) } EXPORT_SYMBOL(ieee80211_register_hw); -void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi, - struct net_device *napi_dev, - int (*poll)(struct napi_struct *, int), - int weight) -{ - struct ieee80211_local *local = hw_to_local(hw); - - netif_napi_add(napi_dev, napi, poll, weight); - local->napi = napi; -} -EXPORT_SYMBOL_GPL(ieee80211_napi_add); - void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); @@ -1173,6 +1164,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) cancel_work_sync(&local->restart_work); cancel_work_sync(&local->reconfig_filter); + cancel_work_sync(&local->tdls_chsw_work); flush_work(&local->sched_scan_stopped_work); ieee80211_clear_tx_pending(local); @@ -1183,6 +1175,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); + skb_queue_purge(&local->skb_queue_tdls_chsw); destroy_workqueue(local->workqueue); wiphy_unregister(local->hw.wiphy); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 817098add1d6..e06a5ca7c9a9 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -158,7 +158,7 @@ void mesh_sta_cleanup(struct sta_info *sta) changed = mesh_accept_plinks_update(sdata); if (!sdata->u.mesh.user_mpm) { changed |= mesh_plink_deactivate(sta); - del_timer_sync(&sta->plink_timer); + del_timer_sync(&sta->mesh->plink_timer); } if (changed) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 085edc1d056b..d80e0a4c16cf 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -19,15 +19,6 @@ #define MAX_PREQ_QUEUE_LEN 64 -/* Destination only */ -#define MP_F_DO 0x1 -/* Reply and forward */ -#define MP_F_RF 0x2 -/* Unknown Sequence Number */ -#define MP_F_USN 0x01 -/* Reason code Present */ -#define MP_F_RCODE 0x02 - static void mesh_queue_preq(struct mesh_path *, u8); static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae) @@ -79,6 +70,12 @@ static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae) #define MSEC_TO_TU(x) (x*1000/1024) #define SN_GT(x, y) ((s32)(y - x) < 0) #define SN_LT(x, y) ((s32)(x - y) < 0) +#define MAX_SANE_SN_DELTA 32 + +static inline u32 SN_DELTA(u32 x, u32 y) +{ + return x >= y ? x - y : y - x; +} #define net_traversal_jiffies(s) \ msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) @@ -279,15 +276,10 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, *pos++ = ttl; /* number of destinations */ *pos++ = 1; - /* - * flags bit, bit 1 is unset if we know the sequence number and - * bit 2 is set if we have a reason code + /* Flags field has AE bit only as defined in + * sec 8.4.2.117 IEEE802.11-2012 */ *pos = 0; - if (!target_sn) - *pos |= MP_F_USN; - if (target_rcode) - *pos |= MP_F_RCODE; pos++; memcpy(pos, target, ETH_ALEN); pos += ETH_ALEN; @@ -316,8 +308,9 @@ void ieee80211s_update_metric(struct ieee80211_local *local, failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK); /* moving average, scaled to 100 */ - sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed); - if (sta->fail_avg > 95) + sta->mesh->fail_avg = + ((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed); + if (sta->mesh->fail_avg > 95) mesh_plink_broken(sta); } @@ -333,7 +326,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, u32 tx_time, estimated_retx; u64 result; - if (sta->fail_avg >= 100) + if (sta->mesh->fail_avg >= 100) return MAX_METRIC; sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo); @@ -341,7 +334,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local, if (WARN_ON(!rate)) return MAX_METRIC; - err = (sta->fail_avg << ARITH_SHIFT) / 100; + err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; /* bitrate is in units of 100 Kbps, while we need rate in units of * 1Mbps. This will be corrected on tx_time computation. @@ -441,6 +434,26 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, process = false; fresh_info = false; } + } else if (!(mpath->flags & MESH_PATH_ACTIVE)) { + bool have_sn, newer_sn, bounced; + + have_sn = mpath->flags & MESH_PATH_SN_VALID; + newer_sn = have_sn && SN_GT(orig_sn, mpath->sn); + bounced = have_sn && + (SN_DELTA(orig_sn, mpath->sn) > + MAX_SANE_SN_DELTA); + + if (!have_sn || newer_sn) { + /* if SN is newer than what we had + * then we can take it */; + } else if (bounced) { + /* if SN is way different than what + * we had then assume the other side + * rebooted or restarted */; + } else { + process = false; + fresh_info = false; + } } } else { mpath = mesh_path_add(sdata, orig_addr); @@ -570,15 +583,13 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, SN_LT(mpath->sn, target_sn)) { mpath->sn = target_sn; mpath->flags |= MESH_PATH_SN_VALID; - } else if ((!(target_flags & MP_F_DO)) && + } else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) && (mpath->flags & MESH_PATH_ACTIVE)) { reply = true; target_metric = mpath->metric; target_sn = mpath->sn; - if (target_flags & MP_F_RF) - target_flags |= MP_F_DO; - else - forward = false; + /* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/ + target_flags |= IEEE80211_PREQ_TO_FLAG; } } rcu_read_unlock(); @@ -736,9 +747,12 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata, if (mpath->flags & MESH_PATH_ACTIVE && ether_addr_equal(ta, sta->sta.addr) && (!(mpath->flags & MESH_PATH_SN_VALID) || - SN_GT(target_sn, mpath->sn))) { + SN_GT(target_sn, mpath->sn) || target_sn == 0)) { mpath->flags &= ~MESH_PATH_ACTIVE; - mpath->sn = target_sn; + if (target_sn != 0) + mpath->sn = target_sn; + else + mpath->sn += 1; spin_unlock_bh(&mpath->state_lock); if (!ifmsh->mshcfg.dot11MeshForwarding) goto endperr; @@ -862,7 +876,7 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); - if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) { + if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { rcu_read_unlock(); return; } @@ -974,7 +988,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_preq_queue *preq_node; struct mesh_path *mpath; - u8 ttl, target_flags; + u8 ttl, target_flags = 0; const u8 *da; u32 lifetime; @@ -1033,9 +1047,9 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) } if (preq_node->flags & PREQ_Q_F_REFRESH) - target_flags = MP_F_DO; + target_flags |= IEEE80211_PREQ_TO_FLAG; else - target_flags = MP_F_RF; + target_flags &= ~IEEE80211_PREQ_TO_FLAG; spin_unlock_bh(&mpath->state_lock); da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr; @@ -1176,7 +1190,9 @@ void mesh_path_timer(unsigned long data) spin_unlock_bh(&mpath->state_lock); mesh_queue_preq(mpath, 0); } else { - mpath->flags = 0; + mpath->flags &= ~(MESH_PATH_RESOLVING | + MESH_PATH_RESOLVED | + MESH_PATH_REQ_QUEUED); mpath->exp_time = jiffies; spin_unlock_bh(&mpath->state_lock); if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 3b59099413fb..58384642e03c 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -13,10 +13,11 @@ #include "rate.h" #include "mesh.h" +#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2) #define PLINK_GET_LLID(p) (p + 2) #define PLINK_GET_PLID(p) (p + 4) -#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \ +#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \ jiffies + msecs_to_jiffies(t))) enum plink_event { @@ -53,18 +54,13 @@ static const char * const mplevents[] = { [CLS_IGNR] = "CLS_IGNR" }; -static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, - enum ieee80211_self_protected_actioncode action, - u8 *da, u16 llid, u16 plid, u16 reason); - - /* We only need a valid sta if user configured a minimum rssi_threshold. */ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold; return rssi_threshold == 0 || - (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold); + (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold); } /** @@ -72,14 +68,14 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata, * * @sta: mesh peer link to restart * - * Locking: this function must be called holding sta->plink_lock + * Locking: this function must be called holding sta->mesh->plink_lock */ static inline void mesh_plink_fsm_restart(struct sta_info *sta) { - lockdep_assert_held(&sta->plink_lock); - sta->plink_state = NL80211_PLINK_LISTEN; - sta->llid = sta->plid = sta->reason = 0; - sta->plink_retries = 0; + lockdep_assert_held(&sta->mesh->plink_lock); + sta->mesh->plink_state = NL80211_PLINK_LISTEN; + sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0; + sta->mesh->plink_retries = 0; } /* @@ -119,7 +115,7 @@ static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata || - sta->plink_state != NL80211_PLINK_ESTAB) + sta->mesh->plink_state != NL80211_PLINK_ESTAB) continue; short_slot = false; @@ -169,7 +165,7 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata || - sta->plink_state != NL80211_PLINK_ESTAB) + sta->mesh->plink_state != NL80211_PLINK_ESTAB) continue; if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20) @@ -204,59 +200,8 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) return BSS_CHANGED_HT; } -/** - * __mesh_plink_deactivate - deactivate mesh peer link - * - * @sta: mesh peer link to deactivate - * - * All mesh paths with this peer as next hop will be flushed - * Returns beacon changed flag if the beacon content changed. - * - * Locking: the caller must hold sta->plink_lock - */ -static u32 __mesh_plink_deactivate(struct sta_info *sta) -{ - struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed = 0; - - lockdep_assert_held(&sta->plink_lock); - - if (sta->plink_state == NL80211_PLINK_ESTAB) - changed = mesh_plink_dec_estab_count(sdata); - sta->plink_state = NL80211_PLINK_BLOCKED; - mesh_path_flush_by_nexthop(sta); - - ieee80211_mps_sta_status_update(sta); - changed |= ieee80211_mps_set_sta_local_pm(sta, - NL80211_MESH_POWER_UNKNOWN); - - return changed; -} - -/** - * mesh_plink_deactivate - deactivate mesh peer link - * - * @sta: mesh peer link to deactivate - * - * All mesh paths with this peer as next hop will be flushed - */ -u32 mesh_plink_deactivate(struct sta_info *sta) -{ - struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed; - - spin_lock_bh(&sta->plink_lock); - changed = __mesh_plink_deactivate(sta); - sta->reason = WLAN_REASON_MESH_PEER_CANCELED; - mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, - sta->sta.addr, sta->llid, sta->plid, - sta->reason); - spin_unlock_bh(&sta->plink_lock); - - return changed; -} - static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta, enum ieee80211_self_protected_actioncode action, u8 *da, u16 llid, u16 plid, u16 reason) { @@ -306,7 +251,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (action == WLAN_SP_MESH_PEERING_CONFIRM) { /* AID */ pos = skb_put(skb, 2); - put_unaligned_le16(plid, pos); + put_unaligned_le16(sta->sta.aid, pos); } if (ieee80211_add_srates_ie(sdata, skb, true, band) || ieee80211_add_ext_srates_ie(sdata, skb, true, band) || @@ -375,6 +320,58 @@ free: return err; } +/** + * __mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * All mesh paths with this peer as next hop will be flushed + * Returns beacon changed flag if the beacon content changed. + * + * Locking: the caller must hold sta->mesh->plink_lock + */ +static u32 __mesh_plink_deactivate(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 changed = 0; + + lockdep_assert_held(&sta->mesh->plink_lock); + + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) + changed = mesh_plink_dec_estab_count(sdata); + sta->mesh->plink_state = NL80211_PLINK_BLOCKED; + mesh_path_flush_by_nexthop(sta); + + ieee80211_mps_sta_status_update(sta); + changed |= ieee80211_mps_set_sta_local_pm(sta, + NL80211_MESH_POWER_UNKNOWN); + + return changed; +} + +/** + * mesh_plink_deactivate - deactivate mesh peer link + * + * @sta: mesh peer link to deactivate + * + * All mesh paths with this peer as next hop will be flushed + */ +u32 mesh_plink_deactivate(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + u32 changed; + + spin_lock_bh(&sta->mesh->plink_lock); + changed = __mesh_plink_deactivate(sta); + sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED; + mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE, + sta->sta.addr, sta->mesh->llid, sta->mesh->plid, + sta->mesh->reason); + spin_unlock_bh(&sta->mesh->plink_lock); + + return changed; +} + static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct ieee802_11_elems *elems, bool insert) @@ -388,13 +385,14 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[band]; rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates); - spin_lock_bh(&sta->plink_lock); + spin_lock_bh(&sta->mesh->plink_lock); sta->last_rx = jiffies; /* rates and capabilities don't change during peering */ - if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon) + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && + sta->mesh->processed_beacon) goto out; - sta->processed_beacon = true; + sta->mesh->processed_beacon = true; if (sta->sta.supp_rates[band] != rates) changed |= IEEE80211_RC_SUPP_RATES_CHANGED; @@ -421,23 +419,57 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata, else rate_control_rate_update(local, sband, sta, changed); out: - spin_unlock_bh(&sta->plink_lock); + spin_unlock_bh(&sta->mesh->plink_lock); +} + +static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + unsigned long *aid_map; + int aid; + + aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1), + sizeof(*aid_map), GFP_KERNEL); + if (!aid_map) + return -ENOMEM; + + /* reserve aid 0 for mcast indication */ + __set_bit(0, aid_map); + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) + __set_bit(sta->sta.aid, aid_map); + rcu_read_unlock(); + + aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1); + kfree(aid_map); + + if (aid > IEEE80211_MAX_AID) + return -ENOBUFS; + + return aid; } static struct sta_info * __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr) { struct sta_info *sta; + int aid; if (sdata->local->num_sta >= MESH_MAX_PLINKS) return NULL; + aid = mesh_allocate_aid(sdata); + if (aid < 0) + return NULL; + sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); if (!sta) return NULL; - sta->plink_state = NL80211_PLINK_LISTEN; + sta->mesh->plink_state = NL80211_PLINK_LISTEN; sta->sta.wme = true; + sta->sta.aid = aid; sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); @@ -524,7 +556,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, goto out; if (mesh_peer_accepts_plinks(elems) && - sta->plink_state == NL80211_PLINK_LISTEN && + sta->mesh->plink_state == NL80211_PLINK_LISTEN && sdata->u.mesh.accepting_plinks && sdata->u.mesh.mshcfg.auto_open_plinks && rssi_threshold_check(sdata, sta)) @@ -554,52 +586,52 @@ static void mesh_plink_timer(unsigned long data) if (sta->sdata->local->quiescing) return; - spin_lock_bh(&sta->plink_lock); + spin_lock_bh(&sta->mesh->plink_lock); /* If a timer fires just before a state transition on another CPU, * we may have already extended the timeout and changed state by the * time we've acquired the lock and arrived here. In that case, * skip this timer and wait for the new one. */ - if (time_before(jiffies, sta->plink_timer.expires)) { + if (time_before(jiffies, sta->mesh->plink_timer.expires)) { mpl_dbg(sta->sdata, "Ignoring timer for %pM in state %s (timer adjusted)", - sta->sta.addr, mplstates[sta->plink_state]); - spin_unlock_bh(&sta->plink_lock); + sta->sta.addr, mplstates[sta->mesh->plink_state]); + spin_unlock_bh(&sta->mesh->plink_lock); return; } /* del_timer() and handler may race when entering these states */ - if (sta->plink_state == NL80211_PLINK_LISTEN || - sta->plink_state == NL80211_PLINK_ESTAB) { + if (sta->mesh->plink_state == NL80211_PLINK_LISTEN || + sta->mesh->plink_state == NL80211_PLINK_ESTAB) { mpl_dbg(sta->sdata, "Ignoring timer for %pM in state %s (timer deleted)", - sta->sta.addr, mplstates[sta->plink_state]); - spin_unlock_bh(&sta->plink_lock); + sta->sta.addr, mplstates[sta->mesh->plink_state]); + spin_unlock_bh(&sta->mesh->plink_lock); return; } mpl_dbg(sta->sdata, "Mesh plink timer for %pM fired on state %s\n", - sta->sta.addr, mplstates[sta->plink_state]); + sta->sta.addr, mplstates[sta->mesh->plink_state]); sdata = sta->sdata; mshcfg = &sdata->u.mesh.mshcfg; - switch (sta->plink_state) { + switch (sta->mesh->plink_state) { case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_OPN_SNT: /* retry timer */ - if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) { + if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) { u32 rand; mpl_dbg(sta->sdata, "Mesh plink for %pM (retry, timeout): %d %d\n", - sta->sta.addr, sta->plink_retries, - sta->plink_timeout); + sta->sta.addr, sta->mesh->plink_retries, + sta->mesh->plink_timeout); get_random_bytes(&rand, sizeof(u32)); - sta->plink_timeout = sta->plink_timeout + - rand % sta->plink_timeout; - ++sta->plink_retries; - mod_plink_timer(sta, sta->plink_timeout); + sta->mesh->plink_timeout = sta->mesh->plink_timeout + + rand % sta->mesh->plink_timeout; + ++sta->mesh->plink_retries; + mod_plink_timer(sta, sta->mesh->plink_timeout); action = WLAN_SP_MESH_PEERING_OPEN; break; } @@ -609,31 +641,31 @@ static void mesh_plink_timer(unsigned long data) /* confirm timer */ if (!reason) reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT; - sta->plink_state = NL80211_PLINK_HOLDING; + sta->mesh->plink_state = NL80211_PLINK_HOLDING; mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); action = WLAN_SP_MESH_PEERING_CLOSE; break; case NL80211_PLINK_HOLDING: /* holding timer */ - del_timer(&sta->plink_timer); + del_timer(&sta->mesh->plink_timer); mesh_plink_fsm_restart(sta); break; default: break; } - spin_unlock_bh(&sta->plink_lock); + spin_unlock_bh(&sta->mesh->plink_lock); if (action) - mesh_plink_frame_tx(sdata, action, sta->sta.addr, - sta->llid, sta->plid, reason); + mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, + sta->mesh->llid, sta->mesh->plid, reason); } static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout) { - sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout); - sta->plink_timer.data = (unsigned long) sta; - sta->plink_timer.function = mesh_plink_timer; - sta->plink_timeout = timeout; - add_timer(&sta->plink_timer); + sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout); + sta->mesh->plink_timer.data = (unsigned long) sta; + sta->mesh->plink_timer.function = mesh_plink_timer; + sta->mesh->plink_timeout = timeout; + add_timer(&sta->mesh->plink_timer); } static bool llid_in_use(struct ieee80211_sub_if_data *sdata, @@ -645,7 +677,7 @@ static bool llid_in_use(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { - if (!memcmp(&sta->llid, &llid, sizeof(llid))) { + if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) { in_use = true; break; } @@ -661,8 +693,6 @@ static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata) do { get_random_bytes(&llid, sizeof(llid)); - /* for mesh PS we still only have the AID range for TIM bits */ - llid = (llid % IEEE80211_MAX_AID) + 1; } while (llid_in_use(sdata, llid)); return llid; @@ -676,16 +706,16 @@ u32 mesh_plink_open(struct sta_info *sta) if (!test_sta_flag(sta, WLAN_STA_AUTH)) return 0; - spin_lock_bh(&sta->plink_lock); - sta->llid = mesh_get_new_llid(sdata); - if (sta->plink_state != NL80211_PLINK_LISTEN && - sta->plink_state != NL80211_PLINK_BLOCKED) { - spin_unlock_bh(&sta->plink_lock); + spin_lock_bh(&sta->mesh->plink_lock); + sta->mesh->llid = mesh_get_new_llid(sdata); + if (sta->mesh->plink_state != NL80211_PLINK_LISTEN && + sta->mesh->plink_state != NL80211_PLINK_BLOCKED) { + spin_unlock_bh(&sta->mesh->plink_lock); return 0; } - sta->plink_state = NL80211_PLINK_OPN_SNT; + sta->mesh->plink_state = NL80211_PLINK_OPN_SNT; mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout); - spin_unlock_bh(&sta->plink_lock); + spin_unlock_bh(&sta->mesh->plink_lock); mpl_dbg(sdata, "Mesh plink: starting establishment with %pM\n", sta->sta.addr); @@ -693,8 +723,8 @@ u32 mesh_plink_open(struct sta_info *sta) /* set the non-peer mode to active during peering */ changed = ieee80211_mps_local_status_update(sdata); - mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN, - sta->sta.addr, sta->llid, 0, 0); + mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN, + sta->sta.addr, sta->mesh->llid, 0, 0); return changed; } @@ -702,10 +732,10 @@ u32 mesh_plink_block(struct sta_info *sta) { u32 changed; - spin_lock_bh(&sta->plink_lock); + spin_lock_bh(&sta->mesh->plink_lock); changed = __mesh_plink_deactivate(sta); - sta->plink_state = NL80211_PLINK_BLOCKED; - spin_unlock_bh(&sta->plink_lock); + sta->mesh->plink_state = NL80211_PLINK_BLOCKED; + spin_unlock_bh(&sta->mesh->plink_lock); return changed; } @@ -715,12 +745,11 @@ static void mesh_plink_close(struct ieee80211_sub_if_data *sdata, enum plink_event event) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; - u16 reason = (event == CLS_ACPT) ? WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG; - sta->reason = reason; - sta->plink_state = NL80211_PLINK_HOLDING; + sta->mesh->reason = reason; + sta->mesh->plink_state = NL80211_PLINK_HOLDING; mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); } @@ -730,8 +759,8 @@ static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; u32 changed = 0; - del_timer(&sta->plink_timer); - sta->plink_state = NL80211_PLINK_ESTAB; + del_timer(&sta->mesh->plink_timer); + sta->mesh->plink_state = NL80211_PLINK_ESTAB; changed |= mesh_plink_inc_estab_count(sdata); changed |= mesh_set_ht_prot_mode(sdata); changed |= mesh_set_short_slot_time(sdata); @@ -758,18 +787,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, u32 changed = 0; mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr, - mplstates[sta->plink_state], mplevents[event]); + mplstates[sta->mesh->plink_state], mplevents[event]); - spin_lock_bh(&sta->plink_lock); - switch (sta->plink_state) { + spin_lock_bh(&sta->mesh->plink_lock); + switch (sta->mesh->plink_state) { case NL80211_PLINK_LISTEN: switch (event) { case CLS_ACPT: mesh_plink_fsm_restart(sta); break; case OPN_ACPT: - sta->plink_state = NL80211_PLINK_OPN_RCVD; - sta->llid = mesh_get_new_llid(sdata); + sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; + sta->mesh->llid = mesh_get_new_llid(sdata); mesh_plink_timer_set(sta, mshcfg->dot11MeshRetryTimeout); @@ -791,11 +820,11 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, break; case OPN_ACPT: /* retry timer is left untouched */ - sta->plink_state = NL80211_PLINK_OPN_RCVD; + sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD; action = WLAN_SP_MESH_PEERING_CONFIRM; break; case CNF_ACPT: - sta->plink_state = NL80211_PLINK_CNF_RCVD; + sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD; mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout); break; default: @@ -855,7 +884,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, case NL80211_PLINK_HOLDING: switch (event) { case CLS_ACPT: - del_timer(&sta->plink_timer); + del_timer(&sta->mesh->plink_timer); mesh_plink_fsm_restart(sta); break; case OPN_ACPT: @@ -874,17 +903,18 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, */ break; } - spin_unlock_bh(&sta->plink_lock); + spin_unlock_bh(&sta->mesh->plink_lock); if (action) { - mesh_plink_frame_tx(sdata, action, sta->sta.addr, - sta->llid, sta->plid, sta->reason); + mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr, + sta->mesh->llid, sta->mesh->plid, + sta->mesh->reason); /* also send confirm in open case */ if (action == WLAN_SP_MESH_PEERING_OPEN) { - mesh_plink_frame_tx(sdata, + mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CONFIRM, - sta->sta.addr, sta->llid, - sta->plid, 0); + sta->sta.addr, sta->mesh->llid, + sta->mesh->plid, 0); } } @@ -939,7 +969,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n"); goto out; } - if (sta->plink_state == NL80211_PLINK_BLOCKED) + if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED) goto out; } @@ -954,7 +984,7 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, if (!matches_local) event = OPN_RJCT; if (!mesh_plink_free_count(sdata) || - (sta->plid && sta->plid != plid)) + (sta->mesh->plid && sta->mesh->plid != plid)) event = OPN_IGNR; else event = OPN_ACPT; @@ -963,14 +993,14 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, if (!matches_local) event = CNF_RJCT; if (!mesh_plink_free_count(sdata) || - sta->llid != llid || - (sta->plid && sta->plid != plid)) + sta->mesh->llid != llid || + (sta->mesh->plid && sta->mesh->plid != plid)) event = CNF_IGNR; else event = CNF_ACPT; break; case WLAN_SP_MESH_PEERING_CLOSE: - if (sta->plink_state == NL80211_PLINK_ESTAB) + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) /* Do not check for llid or plid. This does not * follow the standard but since multiple plinks * per sta are not supported, it is necessary in @@ -981,9 +1011,9 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, * restarted. */ event = CLS_ACPT; - else if (sta->plid != plid) + else if (sta->mesh->plid != plid) event = CLS_IGNR; - else if (ie_len == 8 && sta->llid != llid) + else if (ie_len == 8 && sta->mesh->llid != llid) event = CLS_IGNR; else event = CLS_ACPT; @@ -1070,9 +1100,9 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, mpl_dbg(sdata, "Mesh plink: failed to init peer!\n"); goto unlock_rcu; } - sta->plid = plid; + sta->mesh->plid = plid; } else if (!sta && event == OPN_RJCT) { - mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE, + mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE, mgmt->sa, 0, plid, WLAN_REASON_MESH_CONFIG); goto unlock_rcu; @@ -1081,9 +1111,13 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, goto unlock_rcu; } - /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */ - if (!sta->plid && event == CNF_ACPT) - sta->plid = plid; + if (event == CNF_ACPT) { + /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */ + if (!sta->mesh->plid) + sta->mesh->plid = plid; + + sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt)); + } changed |= mesh_plink_fsm(sdata, sta, event); diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c index ad8b377b4b9f..90a268abea17 100644 --- a/net/mac80211/mesh_ps.c +++ b/net/mac80211/mesh_ps.c @@ -92,16 +92,16 @@ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) if (sdata != sta->sdata) continue; - switch (sta->plink_state) { + switch (sta->mesh->plink_state) { case NL80211_PLINK_OPN_SNT: case NL80211_PLINK_OPN_RCVD: case NL80211_PLINK_CNF_RCVD: peering = true; break; case NL80211_PLINK_ESTAB: - if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP) + if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP) light_sleep_cnt++; - else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP) + else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP) deep_sleep_cnt++; break; default: @@ -153,19 +153,19 @@ u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, { struct ieee80211_sub_if_data *sdata = sta->sdata; - if (sta->local_pm == pm) + if (sta->mesh->local_pm == pm) return 0; mps_dbg(sdata, "local STA operates in mode %d with %pM\n", pm, sta->sta.addr); - sta->local_pm = pm; + sta->mesh->local_pm = pm; /* * announce peer-specific power mode transition * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3) */ - if (sta->plink_state == NL80211_PLINK_ESTAB) + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) mps_qos_null_tx(sta); return ieee80211_mps_local_status_update(sdata); @@ -197,8 +197,8 @@ void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, if (is_unicast_ether_addr(hdr->addr1) && ieee80211_is_data_qos(hdr->frame_control) && - sta->plink_state == NL80211_PLINK_ESTAB) - pm = sta->local_pm; + sta->mesh->plink_state == NL80211_PLINK_ESTAB) + pm = sta->mesh->local_pm; else pm = sdata->u.mesh.nonpeer_pm; @@ -241,16 +241,16 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta) * use peer-specific power mode if peering is established and the * peer's power mode is known */ - if (sta->plink_state == NL80211_PLINK_ESTAB && - sta->peer_pm != NL80211_MESH_POWER_UNKNOWN) - pm = sta->peer_pm; + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB && + sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN) + pm = sta->mesh->peer_pm; else - pm = sta->nonpeer_pm; + pm = sta->mesh->nonpeer_pm; do_buffer = (pm != NL80211_MESH_POWER_ACTIVE); /* clear the MPSP flags for non-peers or active STA */ - if (sta->plink_state != NL80211_PLINK_ESTAB) { + if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) { clear_sta_flag(sta, WLAN_STA_MPSP_OWNER); clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); } else if (!do_buffer) { @@ -296,13 +296,13 @@ static void mps_set_sta_peer_pm(struct sta_info *sta, pm = NL80211_MESH_POWER_ACTIVE; } - if (sta->peer_pm == pm) + if (sta->mesh->peer_pm == pm) return; mps_dbg(sta->sdata, "STA %pM enters mode %d\n", sta->sta.addr, pm); - sta->peer_pm = pm; + sta->mesh->peer_pm = pm; ieee80211_mps_sta_status_update(sta); } @@ -317,13 +317,13 @@ static void mps_set_sta_nonpeer_pm(struct sta_info *sta, else pm = NL80211_MESH_POWER_ACTIVE; - if (sta->nonpeer_pm == pm) + if (sta->mesh->nonpeer_pm == pm) return; mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n", sta->sta.addr, pm); - sta->nonpeer_pm = pm; + sta->mesh->nonpeer_pm = pm; ieee80211_mps_sta_status_update(sta); } @@ -552,7 +552,7 @@ void ieee80211_mpsp_trigger_process(u8 *qc, struct sta_info *sta, } else { if (eosp) clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); - else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE) + else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE) set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT); if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER)) @@ -577,9 +577,9 @@ void ieee80211_mps_frame_release(struct sta_info *sta, int ac, buffer_local = 0; bool has_buffered = false; - if (sta->plink_state == NL80211_PLINK_ESTAB) + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len, - sta->llid); + sta->mesh->aid); if (has_buffered) mps_dbg(sta->sdata, "%pM indicates buffered frames\n", @@ -598,7 +598,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta, if (!has_buffered && !buffer_local) return; - if (sta->plink_state == NL80211_PLINK_ESTAB) + if (sta->mesh->plink_state == NL80211_PLINK_ESTAB) mpsp_trigger_send(sta, has_buffered, !buffer_local); else mps_frame_deliver(sta, 1); diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c index 09625d6205c3..64bc22ad9496 100644 --- a/net/mac80211/mesh_sync.c +++ b/net/mac80211/mesh_sync.c @@ -127,14 +127,14 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, /* Timing offset calculation (see 13.13.2.2.2) */ t_t = le64_to_cpu(mgmt->u.beacon.timestamp); - sta->t_offset = t_t - t_r; + sta->mesh->t_offset = t_t - t_r; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { - s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset; + s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset; msync_dbg(sdata, - "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n", - sta->sta.addr, (long long) sta->t_offset, - (long long) sta->t_offset_setpoint, + "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n", + sta->sta.addr, (long long) sta->mesh->t_offset, + (long long) sta->mesh->t_offset_setpoint, (long long) t_clockdrift); if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT || @@ -152,12 +152,12 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, ifmsh->sync_offset_clockdrift_max = t_clockdrift; spin_unlock_bh(&ifmsh->sync_offset_lock); } else { - sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN; + sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN; set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN); msync_dbg(sdata, - "STA %pM : offset was invalid, sta->t_offset=%lld\n", + "STA %pM : offset was invalid, t_offset=%lld\n", sta->sta.addr, - (long long) sta->t_offset); + (long long) sta->mesh->t_offset); } no_sync: diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9b2cc278ac2a..705ef1d040ed 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -6,6 +6,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -538,11 +539,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); } +/* This function determines vht capability flags for the association + * and builds the IE. + * Note - the function may set the owner of the MU-MIMO capability + */ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, struct ieee80211_supported_band *sband, struct ieee80211_vht_cap *ap_vht_cap) { + struct ieee80211_local *local = sdata->local; u8 *pos; u32 cap; struct ieee80211_sta_vht_cap vht_cap; @@ -576,7 +582,34 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, */ if (!(ap_vht_cap->vht_cap_info & cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) - cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + else if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + + /* + * If some other vif is using the MU-MIMO capablity we cannot associate + * using MU-MIMO - this will lead to contradictions in the group-id + * mechanism. + * Ownership is defined since association request, in order to avoid + * simultaneous associations with MU-MIMO. + */ + if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) { + bool disable_mu_mimo = false; + struct ieee80211_sub_if_data *other; + + list_for_each_entry_rcu(other, &local->interfaces, list) { + if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) { + disable_mu_mimo = true; + break; + } + } + if (disable_mu_mimo) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + else + sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER; + } mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; @@ -1096,24 +1129,6 @@ static void ieee80211_chswitch_timer(unsigned long data) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work); } -static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) -{ - struct sta_info *sta; - u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; - - rcu_read_lock(); - list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { - if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || - !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) - continue; - - ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, - NL80211_TDLS_TEARDOWN, reason, - GFP_ATOMIC); - } - rcu_read_unlock(); -} - static void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp, u32 device_timestamp, @@ -2076,6 +2091,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa)); memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask)); + sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER; sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; @@ -2538,6 +2554,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, eth_zero_addr(sdata->u.mgd.bssid); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); sdata->u.mgd.flags = 0; + sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER; mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); mutex_unlock(&sdata->local->mtx); @@ -3034,12 +3051,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, rate_control_rate_init(sta); - if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) { + if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) set_sta_flag(sta, WLAN_STA_MFP); - sta->sta.mfp = true; - } else { - sta->sta.mfp = false; - } sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS; diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index 358d5f9d8207..573b81a1fb2d 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -179,7 +179,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; - u32 changed = BSS_CHANGED_OCB; + u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; int err; if (ifocb->joined == true) diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index fda33f961d83..9857693b91ec 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -29,6 +29,65 @@ module_param(ieee80211_default_rc_algo, charp, 0644); MODULE_PARM_DESC(ieee80211_default_rc_algo, "Default rate control algorithm for mac80211 to use"); +void rate_control_rate_init(struct sta_info *sta) +{ + struct ieee80211_local *local = sta->sdata->local; + struct rate_control_ref *ref = sta->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_supported_band *sband; + struct ieee80211_chanctx_conf *chanctx_conf; + + ieee80211_sta_set_rx_nss(sta); + + if (!ref) + return; + + rcu_read_lock(); + + chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + + sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; + + spin_lock_bh(&sta->rate_ctrl_lock); + ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, + priv_sta); + spin_unlock_bh(&sta->rate_ctrl_lock); + rcu_read_unlock(); + set_sta_flag(sta, WLAN_STA_RATE_CONTROL); +} + +void rate_control_rate_update(struct ieee80211_local *local, + struct ieee80211_supported_band *sband, + struct sta_info *sta, u32 changed) +{ + struct rate_control_ref *ref = local->rate_ctrl; + struct ieee80211_sta *ista = &sta->sta; + void *priv_sta = sta->rate_ctrl_priv; + struct ieee80211_chanctx_conf *chanctx_conf; + + if (ref && ref->ops->rate_update) { + rcu_read_lock(); + + chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); + if (WARN_ON(!chanctx_conf)) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&sta->rate_ctrl_lock); + ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, + ista, priv_sta, changed); + spin_unlock_bh(&sta->rate_ctrl_lock); + rcu_read_unlock(); + } + drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); +} + int ieee80211_rate_control_register(const struct rate_control_ops *ops) { struct rate_control_alg *alg; @@ -294,39 +353,37 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta, } EXPORT_SYMBOL(rate_control_send_low); -static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate, - int n_bitrates, u32 mask) +static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask) { int j; /* See whether the selected rate or anything below it is allowed. */ - for (j = rate->idx; j >= 0; j--) { + for (j = *rate_idx; j >= 0; j--) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ - rate->idx = j; + *rate_idx = j; return true; } } /* Try to find a higher rate that would be allowed */ - for (j = rate->idx + 1; j < n_bitrates; j++) { + for (j = *rate_idx + 1; j < n_bitrates; j++) { if (mask & (1 << j)) { /* Okay, found a suitable rate. Use it. */ - rate->idx = j; + *rate_idx = j; return true; } } return false; } -static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate, - u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) +static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask) { int i, j; int ridx, rbit; - ridx = rate->idx / 8; - rbit = rate->idx % 8; + ridx = *rate_idx / 8; + rbit = *rate_idx % 8; /* sanity check */ if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN) @@ -336,20 +393,20 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate, for (i = ridx; i >= 0; i--) { for (j = rbit; j >= 0; j--) if (mcs_mask[i] & BIT(j)) { - rate->idx = i * 8 + j; + *rate_idx = i * 8 + j; return true; } rbit = 7; } /* Try to find a higher rate that would be allowed */ - ridx = (rate->idx + 1) / 8; - rbit = (rate->idx + 1) % 8; + ridx = (*rate_idx + 1) / 8; + rbit = (*rate_idx + 1) % 8; for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) { for (j = rbit; j < 8; j++) if (mcs_mask[i] & BIT(j)) { - rate->idx = i * 8 + j; + *rate_idx = i * 8 + j; return true; } rbit = 0; @@ -357,37 +414,93 @@ static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate, return false; } +static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask) +{ + int i, j; + int ridx, rbit; + ridx = *rate_idx >> 4; + rbit = *rate_idx & 0xf; -static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, + if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX) + return false; + + /* See whether the selected rate or anything below it is allowed. */ + for (i = ridx; i >= 0; i--) { + for (j = rbit; j >= 0; j--) { + if (vht_mask[i] & BIT(j)) { + *rate_idx = (i << 4) | j; + return true; + } + } + rbit = 15; + } + + /* Try to find a higher rate that would be allowed */ + ridx = (*rate_idx + 1) >> 4; + rbit = (*rate_idx + 1) & 0xf; + + for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) { + for (j = rbit; j < 16; j++) { + if (vht_mask[i] & BIT(j)) { + *rate_idx = (i << 4) | j; + return true; + } + } + rbit = 0; + } + return false; +} + +static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags, struct ieee80211_supported_band *sband, enum nl80211_chan_width chan_width, u32 mask, - u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], + u16 vht_mask[NL80211_VHT_NSS_MAX]) { - struct ieee80211_tx_rate alt_rate; + if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) { + /* handle VHT rates */ + if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask)) + return; - /* handle HT rates */ - if (rate->flags & IEEE80211_TX_RC_MCS) { - if (rate_idx_match_mcs_mask(rate, mcs_mask)) + *rate_idx = 0; + /* keep protection flags */ + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + + *rate_flags |= IEEE80211_TX_RC_MCS; + if (chan_width == NL80211_CHAN_WIDTH_40) + *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; /* also try the legacy rates. */ - alt_rate.idx = 0; - /* keep protection flags */ - alt_rate.flags = rate->flags & - (IEEE80211_TX_RC_USE_RTS_CTS | - IEEE80211_TX_RC_USE_CTS_PROTECT | - IEEE80211_TX_RC_USE_SHORT_PREAMBLE); - alt_rate.count = rate->count; - if (rate_idx_match_legacy_mask(&alt_rate, - sband->n_bitrates, mask)) { - *rate = alt_rate; + *rate_flags &= ~(IEEE80211_TX_RC_MCS | + IEEE80211_TX_RC_40_MHZ_WIDTH); + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) return; - } - } else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) { + } else if (*rate_flags & IEEE80211_TX_RC_MCS) { + /* handle HT rates */ + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) + return; + + /* also try the legacy rates. */ + *rate_idx = 0; + /* keep protection flags */ + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) + return; + } else { /* handle legacy rates */ - if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask)) + if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates, + mask)) return; /* if HT BSS, and we handle a data frame, also try HT rates */ @@ -400,23 +513,19 @@ static void rate_idx_match_mask(struct ieee80211_tx_rate *rate, break; } - alt_rate.idx = 0; + *rate_idx = 0; /* keep protection flags */ - alt_rate.flags = rate->flags & - (IEEE80211_TX_RC_USE_RTS_CTS | - IEEE80211_TX_RC_USE_CTS_PROTECT | - IEEE80211_TX_RC_USE_SHORT_PREAMBLE); - alt_rate.count = rate->count; + *rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT | + IEEE80211_TX_RC_USE_SHORT_PREAMBLE); - alt_rate.flags |= IEEE80211_TX_RC_MCS; + *rate_flags |= IEEE80211_TX_RC_MCS; if (chan_width == NL80211_CHAN_WIDTH_40) - alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + *rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; - if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) { - *rate = alt_rate; + if (rate_idx_match_mcs_mask(rate_idx, mcs_mask)) return; - } } /* @@ -569,18 +678,92 @@ static void rate_control_fill_sta_table(struct ieee80211_sta *sta, } } +static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, u32 *mask, + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN], + u16 vht_mask[NL80211_VHT_NSS_MAX]) +{ + u32 i, flags; + + *mask = sdata->rc_rateidx_mask[sband->band]; + flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); + for (i = 0; i < sband->n_bitrates; i++) { + if ((flags & sband->bitrates[i].flags) != flags) + *mask &= ~BIT(i); + } + + if (*mask == (1 << sband->n_bitrates) - 1 && + !sdata->rc_has_mcs_mask[sband->band] && + !sdata->rc_has_vht_mcs_mask[sband->band]) + return false; + + if (sdata->rc_has_mcs_mask[sband->band]) + memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band], + IEEE80211_HT_MCS_MASK_LEN); + else + memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN); + + if (sdata->rc_has_vht_mcs_mask[sband->band]) + memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band], + sizeof(u16) * NL80211_VHT_NSS_MAX); + else + memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX); + + if (sta) { + __le16 sta_vht_cap; + u16 sta_vht_mask[NL80211_VHT_NSS_MAX]; + + /* Filter out rates that the STA does not support */ + *mask &= sta->supp_rates[sband->band]; + for (i = 0; i < sizeof(mcs_mask); i++) + mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i]; + + sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map; + ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask); + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) + vht_mask[i] &= sta_vht_mask[i]; + } + + return true; +} + +static void +rate_control_apply_mask_ratetbl(struct sta_info *sta, + struct ieee80211_supported_band *sband, + struct ieee80211_sta_rates *rates) +{ + int i; + u32 mask; + u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; + u16 vht_mask[NL80211_VHT_NSS_MAX]; + enum nl80211_chan_width chan_width; + + if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask, + mcs_mask, vht_mask)) + return; + + chan_width = sta->sdata->vif.bss_conf.chandef.width; + for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) { + if (rates->rate[i].idx < 0) + break; + + rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags, + sband, chan_width, mask, mcs_mask, + vht_mask); + } +} + static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, - struct ieee80211_tx_info *info, struct ieee80211_tx_rate *rates, int max_rates) { enum nl80211_chan_width chan_width; u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN]; - bool has_mcs_mask; u32 mask; - u32 rate_flags; + u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX]; int i; /* @@ -588,30 +771,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, * default mask (allow all rates) is used to save some processing for * the common case. */ - mask = sdata->rc_rateidx_mask[info->band]; - has_mcs_mask = sdata->rc_has_mcs_mask[info->band]; - rate_flags = - ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef); - for (i = 0; i < sband->n_bitrates; i++) - if ((rate_flags & sband->bitrates[i].flags) != rate_flags) - mask &= ~BIT(i); - - if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask) + if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask, + vht_mask)) return; - if (has_mcs_mask) - memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band], - sizeof(mcs_mask)); - else - memset(mcs_mask, 0xff, sizeof(mcs_mask)); - - if (sta) { - /* Filter out rates that the STA does not support */ - mask &= sta->supp_rates[info->band]; - for (i = 0; i < sizeof(mcs_mask); i++) - mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i]; - } - /* * Make sure the rate index selected for each TX rate is * included in the configured mask and change the rate indexes @@ -623,8 +786,10 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata, if (rates[i].idx < 0) break; - rate_idx_match_mask(&rates[i], sband, chan_width, mask, - mcs_mask); + rate_flags = rates[i].flags; + rate_idx_match_mask(&rates[i].idx, &rate_flags, sband, + chan_width, mask, mcs_mask, vht_mask); + rates[i].flags = rate_flags; } } @@ -648,7 +813,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif, sband = sdata->local->hw.wiphy->bands[info->band]; if (ieee80211_is_data(hdr->frame_control)) - rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates); + rate_control_apply_mask(sdata, sta, sband, dest, max_rates); if (dest[0].idx < 0) __rate_control_send_low(&sdata->local->hw, sband, sta, info, @@ -705,7 +870,10 @@ int rate_control_set_rates(struct ieee80211_hw *hw, { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); struct ieee80211_sta_rates *old; + struct ieee80211_supported_band *sband; + sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)]; + rate_control_apply_mask_ratetbl(sta, sband, rates); /* * mac80211 guarantees that this function will not be called * concurrently, so the following RCU access is safe, even without diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 25c9be5dd7fd..624fe5b81615 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h @@ -71,64 +71,10 @@ rate_control_tx_status_noskb(struct ieee80211_local *local, spin_unlock_bh(&sta->rate_ctrl_lock); } -static inline void rate_control_rate_init(struct sta_info *sta) -{ - struct ieee80211_local *local = sta->sdata->local; - struct rate_control_ref *ref = sta->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - struct ieee80211_supported_band *sband; - struct ieee80211_chanctx_conf *chanctx_conf; - - ieee80211_sta_set_rx_nss(sta); - - if (!ref) - return; - - rcu_read_lock(); - - chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - return; - } - - sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band]; - - spin_lock_bh(&sta->rate_ctrl_lock); - ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista, - priv_sta); - spin_unlock_bh(&sta->rate_ctrl_lock); - rcu_read_unlock(); - set_sta_flag(sta, WLAN_STA_RATE_CONTROL); -} - -static inline void rate_control_rate_update(struct ieee80211_local *local, +void rate_control_rate_init(struct sta_info *sta); +void rate_control_rate_update(struct ieee80211_local *local, struct ieee80211_supported_band *sband, - struct sta_info *sta, u32 changed) -{ - struct rate_control_ref *ref = local->rate_ctrl; - struct ieee80211_sta *ista = &sta->sta; - void *priv_sta = sta->rate_ctrl_priv; - struct ieee80211_chanctx_conf *chanctx_conf; - - if (ref && ref->ops->rate_update) { - rcu_read_lock(); - - chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - return; - } - - spin_lock_bh(&sta->rate_ctrl_lock); - ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def, - ista, priv_sta, changed); - spin_unlock_bh(&sta->rate_ctrl_lock); - rcu_read_unlock(); - } - drv_sta_rc_update(local, sta->sdata, &sta->sta, changed); -} + struct sta_info *sta, u32 changed); static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, struct sta_info *sta, gfp_t gfp) diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index 543b67233535..3928dbd24e25 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -867,7 +867,13 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, else idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8; - if (offset > 0) { + /* enable RTS/CTS if needed: + * - if station is in dynamic SMPS (and streams > 1) + * - for fallback rates, to increase chances of getting through + */ + if (offset > 0 && + (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC && + group->streams > 1)) { ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts; flags |= IEEE80211_TX_RC_USE_RTS_CTS; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5dae166cb7f5..5bc0b88d9eb1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -42,6 +42,51 @@ static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) u64_stats_update_end(&tstats->syncp); } +static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, + enum nl80211_iftype type) +{ + __le16 fc = hdr->frame_control; + + if (ieee80211_is_data(fc)) { + if (len < 24) /* drop incorrect hdr len (data) */ + return NULL; + + if (ieee80211_has_a4(fc)) + return NULL; + if (ieee80211_has_tods(fc)) + return hdr->addr1; + if (ieee80211_has_fromds(fc)) + return hdr->addr2; + + return hdr->addr3; + } + + if (ieee80211_is_mgmt(fc)) { + if (len < 24) /* drop incorrect hdr len (mgmt) */ + return NULL; + return hdr->addr3; + } + + if (ieee80211_is_ctl(fc)) { + if (ieee80211_is_pspoll(fc)) + return hdr->addr1; + + if (ieee80211_is_back_req(fc)) { + switch (type) { + case NL80211_IFTYPE_STATION: + return hdr->addr2; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + return hdr->addr1; + default: + break; /* fall through to the return */ + } + } + } + + return NULL; +} + /* * monitor mode reception * @@ -77,8 +122,7 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, hdr = (void *)(skb->data + rtap_vendor_space); if (status->flag & (RX_FLAG_FAILED_FCS_CRC | - RX_FLAG_FAILED_PLCP_CRC | - RX_FLAG_AMPDU_IS_ZEROLEN)) + RX_FLAG_FAILED_PLCP_CRC)) return true; if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) @@ -346,10 +390,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); put_unaligned_le32(status->ampdu_reference, pos); pos += 4; - if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN) - flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN; - if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN) - flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN; if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; if (status->flag & RX_FLAG_AMPDU_IS_LAST) @@ -1093,11 +1133,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; - if (unlikely(rx->skb->len < 16)) { - I802_DEBUG_INC(rx->local->rx_handlers_drop_short); - return RX_DROP_MONITOR; - } - /* Drop disallowed frame classes based on STA auth/assoc state; * IEEE 802.11, Chap 5.5. * @@ -1240,22 +1275,22 @@ static void sta_ps_end(struct sta_info *sta) ieee80211_sta_ps_deliver_wakeup(sta); } -int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) +int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) { - struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); + struct sta_info *sta = container_of(pubsta, struct sta_info, sta); bool in_ps; - WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS)); + WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); /* Don't let the same PS state be set twice */ - in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); + in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); if ((start && in_ps) || (!start && !in_ps)) return -EINVAL; if (start) - sta_ps_start(sta_inf); + sta_ps_start(sta); else - sta_ps_end(sta_inf); + sta_ps_end(sta); return 0; } @@ -1393,7 +1428,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) sta->rx_bytes += rx->skb->len; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { sta->last_signal = status->signal; - ewma_add(&sta->avg_signal, -status->signal); + ewma_signal_add(&sta->avg_signal, -status->signal); } if (status->chains) { @@ -1405,7 +1440,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) continue; sta->chain_signal_last[i] = signal; - ewma_add(&sta->chain_signal_avg[i], -signal); + ewma_signal_add(&sta->chain_signal_avg[i], -signal); } } @@ -1647,7 +1682,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) return RX_DROP_MONITOR; - rx->key->tx_rx_count++; /* TODO: add threshold stuff again */ } else { return RX_DROP_MONITOR; @@ -1883,7 +1917,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) /* Complete frame has been reassembled - process it now */ status = IEEE80211_SKB_RXCB(rx->skb); - status->rx_flags |= IEEE80211_RX_FRAGMENTED; out: ieee80211_led_rx(rx->local); @@ -2108,9 +2141,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); - if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && - rx->local->napi) - napi_gro_receive(rx->local->napi, skb); + if (rx->napi) + napi_gro_receive(rx->napi, skb); else netif_receive_skb(skb); } @@ -2378,9 +2410,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) tf->category == WLAN_CATEGORY_TDLS && (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { - rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW; - skb_queue_tail(&sdata->skb_queue, rx->skb); - ieee80211_queue_work(&rx->local->hw, &sdata->work); + skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); + schedule_work(&local->tdls_chsw_work); if (rx->sta) rx->sta->rx_packets++; @@ -3004,7 +3035,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) return RX_QUEUED; } -/* TODO: use IEEE80211_RX_FRAGMENTED */ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, struct ieee80211_rate *rate) { @@ -3216,7 +3246,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, - .flags = IEEE80211_RX_REORDER_TIMER, + .napi = NULL, /* must be NULL to not have races */ }; struct tid_ampdu_rx *tid_agg_rx; @@ -3286,7 +3316,7 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) case NL80211_IFTYPE_OCB: if (!bssid) return false; - if (ieee80211_is_beacon(hdr->frame_control)) + if (!ieee80211_is_data_present(hdr->frame_control)) return false; if (!is_broadcast_ether_addr(bssid)) return false; @@ -3393,7 +3423,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, * be called with rcu_read_lock protection. */ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) + struct sk_buff *skb, + struct napi_struct *napi) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; @@ -3409,6 +3440,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, memset(&rx, 0, sizeof(rx)); rx.skb = skb; rx.local = local; + rx.napi = napi; if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) I802_DEBUG_INC(local->dot11ReceivedFragmentCount); @@ -3510,7 +3542,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, * This is the receive path handler. It is called by a low level driver when an * 802.11 MPDU is received from the hardware. */ -void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, + struct napi_struct *napi) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate = NULL; @@ -3609,7 +3642,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) ieee80211_tpt_led_trig_rx(local, ((struct ieee80211_hdr *)skb->data)->frame_control, skb->len); - __ieee80211_rx_handle_packet(hw, skb); + __ieee80211_rx_handle_packet(hw, skb, napi); rcu_read_unlock(); @@ -3617,7 +3650,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) drop: kfree_skb(skb); } -EXPORT_SYMBOL(ieee80211_rx); +EXPORT_SYMBOL(ieee80211_rx_napi); /* This is a version of the rx handler that can be called from hard irq * context. Post the skb on the queue and schedule the tasklet */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 666ddac3c87c..64f1936350c6 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -68,7 +68,7 @@ static const struct rhashtable_params sta_rht_params = { .nelem_hint = 3, /* start small */ .automatic_shrinking = true, .head_offset = offsetof(struct sta_info, hash_node), - .key_offset = offsetof(struct sta_info, sta.addr), + .key_offset = offsetof(struct sta_info, addr), .key_len = ETH_ALEN, .hashfn = sta_addr_hash, .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, @@ -249,6 +249,9 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); kfree(rcu_dereference_raw(sta->sta.rates)); +#ifdef CONFIG_MAC80211_MESH + kfree(sta->mesh); +#endif kfree(sta); } @@ -313,13 +316,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); #ifdef CONFIG_MAC80211_MESH - spin_lock_init(&sta->plink_lock); - if (ieee80211_vif_is_mesh(&sdata->vif) && - !sdata->u.mesh.user_mpm) - init_timer(&sta->plink_timer); - sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; + if (ieee80211_vif_is_mesh(&sdata->vif)) { + sta->mesh = kzalloc(sizeof(*sta->mesh), gfp); + if (!sta->mesh) + goto free; + spin_lock_init(&sta->mesh->plink_lock); + if (ieee80211_vif_is_mesh(&sdata->vif) && + !sdata->u.mesh.user_mpm) + init_timer(&sta->mesh->plink_timer); + sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE; + } #endif + memcpy(sta->addr, addr, ETH_ALEN); memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; @@ -332,9 +341,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, ktime_get_ts(&uptime); sta->last_connected = uptime.tv_sec; - ewma_init(&sta->avg_signal, 1024, 8); + ewma_signal_init(&sta->avg_signal); for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++) - ewma_init(&sta->chain_signal_avg[i], 1024, 8); + ewma_signal_init(&sta->chain_signal_avg[i]); if (local->ops->wake_tx_queue) { void *txq_data; @@ -405,6 +414,9 @@ free_txq: if (sta->sta.txq[0]) kfree(to_txq_info(sta->sta.txq[0])); free: +#ifdef CONFIG_MAC80211_MESH + kfree(sta->mesh); +#endif kfree(sta); return NULL; } @@ -623,7 +635,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) bool indicate_tim = false; u8 ignore_for_tim = sta->sta.uapsd_queues; int ac; - u16 id; + u16 id = sta->sta.aid; if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { @@ -631,12 +643,9 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) return; ps = &sta->sdata->bss->ps; - id = sta->sta.aid; #ifdef CONFIG_MAC80211_MESH } else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) { ps = &sta->sdata->u.mesh.ps; - /* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */ - id = sta->plid % (IEEE80211_MAX_AID + 1); #endif } else { return; @@ -1887,7 +1896,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) } if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) { - sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); + sinfo->signal_avg = + (s8) -ewma_signal_read(&sta->avg_signal); sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); } } @@ -1902,7 +1912,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { sinfo->chain_signal[i] = sta->chain_signal_last[i]; sinfo->chain_signal_avg[i] = - (s8) -ewma_read(&sta->chain_signal_avg[i]); + (s8) -ewma_signal_read(&sta->chain_signal_avg[i]); } } @@ -1956,16 +1966,16 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) BIT(NL80211_STA_INFO_PEER_PM) | BIT(NL80211_STA_INFO_NONPEER_PM); - sinfo->llid = sta->llid; - sinfo->plid = sta->plid; - sinfo->plink_state = sta->plink_state; + sinfo->llid = sta->mesh->llid; + sinfo->plid = sta->mesh->plid; + sinfo->plink_state = sta->mesh->plink_state; if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET); - sinfo->t_offset = sta->t_offset; + sinfo->t_offset = sta->mesh->t_offset; } - sinfo->local_pm = sta->local_pm; - sinfo->peer_pm = sta->peer_pm; - sinfo->nonpeer_pm = sta->nonpeer_pm; + sinfo->local_pm = sta->mesh->local_pm; + sinfo->peer_pm = sta->mesh->peer_pm; + sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; #endif } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 226f8ca47ad6..b087c71ff7fe 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -53,6 +53,8 @@ * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this * TDLS peer + * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on + * the BSS base channel. * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was * keeping station in power-save mode, reply when the driver * unblocks the station. @@ -84,6 +86,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_TDLS_INITIATOR, WLAN_STA_TDLS_CHAN_SWITCH, WLAN_STA_TDLS_OFF_CHANNEL, + WLAN_STA_TDLS_WIDER_BW, WLAN_STA_UAPSD, WLAN_STA_SP, WLAN_STA_4ADDR_EVENT, @@ -269,6 +272,56 @@ struct ieee80211_fast_tx { struct rcu_head rcu_head; }; +/** + * struct mesh_sta - mesh STA information + * @plink_lock: serialize access to plink fields + * @llid: Local link ID + * @plid: Peer link ID + * @aid: local aid supplied by peer + * @reason: Cancel reason on PLINK_HOLDING state + * @plink_retries: Retries in establishment + * @plink_state: peer link state + * @plink_timeout: timeout of peer link + * @plink_timer: peer link watch timer + * @t_offset: timing offset relative to this host + * @t_offset_setpoint: reference timing offset of this sta to be used when + * calculating clockdrift + * @local_pm: local link-specific power save mode + * @peer_pm: peer-specific power save mode towards local STA + * @nonpeer_pm: STA power save mode towards non-peer neighbors + * @processed_beacon: set to true after peer rates and capabilities are + * processed + * @fail_avg: moving percentage of failed MSDUs + */ +struct mesh_sta { + struct timer_list plink_timer; + + s64 t_offset; + s64 t_offset_setpoint; + + spinlock_t plink_lock; + u16 llid; + u16 plid; + u16 aid; + u16 reason; + u8 plink_retries; + + bool processed_beacon; + + enum nl80211_plink_state plink_state; + u32 plink_timeout; + + /* mesh power save */ + enum nl80211_mesh_power_mode local_pm; + enum nl80211_mesh_power_mode peer_pm; + enum nl80211_mesh_power_mode nonpeer_pm; + + /* moving percentage of failed MSDUs */ + unsigned int fail_avg; +}; + +DECLARE_EWMA(signal, 1024, 8) + /** * struct sta_info - STA information * @@ -278,12 +331,13 @@ struct ieee80211_fast_tx { * @list: global linked list entry * @free_list: list entry for keeping track of stations to free * @hash_node: hash node for rhashtable + * @addr: station's MAC address - duplicated from public part to + * let the hash table work with just a single cacheline * @local: pointer to the global information * @sdata: virtual interface this station belongs to * @ptk: peer keys negotiated with this station, if any * @ptk_idx: last installed peer key index * @gtk: group keys negotiated with this station, if any - * @gtk_idx: last installed group key index * @rate_ctrl: rate control algorithm reference * @rate_ctrl_lock: spinlock used to protect rate control data * (data inside the algorithm, so serializes calls there) @@ -318,30 +372,17 @@ struct ieee80211_fast_tx { * @last_signal: signal of last received frame from this STA * @avg_signal: moving average of signal of received frames from this STA * @last_ack_signal: signal of last received Ack frame from this STA - * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) + * @last_seq_ctrl: last received seq/frag number from this STA (per TID + * plus one for non-QoS frames) * @tx_filtered_count: number of frames the hardware filtered for this STA * @tx_retry_failed: number of frames that failed retry * @tx_retry_count: total number of retries for frames to this STA - * @fail_avg: moving percentage of failed MSDUs * @tx_packets: number of RX/TX MSDUs * @tx_bytes: number of bytes transmitted to this STA * @tid_seq: per-TID sequence numbers for sending to this STA * @ampdu_mlme: A-MPDU state machine state * @timer_to_tid: identity mapping to ID timers - * @plink_lock: serialize access to plink fields - * @llid: Local link ID - * @plid: Peer link ID - * @reason: Cancel reason on PLINK_HOLDING state - * @plink_retries: Retries in establishment - * @plink_state: peer link state - * @plink_timeout: timeout of peer link - * @plink_timer: peer link watch timer - * @t_offset: timing offset relative to this host - * @t_offset_setpoint: reference timing offset of this sta to be used when - * calculating clockdrift - * @local_pm: local link-specific power save mode - * @peer_pm: peer-specific power save mode towards local STA - * @nonpeer_pm: STA power save mode towards non-peer neighbors + * @mesh: mesh STA information * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver @@ -369,19 +410,19 @@ struct ieee80211_fast_tx { * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID * entry for non-QoS frames * @fast_tx: TX fastpath information - * @processed_beacon: set to true after peer rates and capabilities are - * processed + * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to + * the BSS one. */ struct sta_info { /* General information, mostly static */ struct list_head list, free_list; struct rcu_head rcu_head; struct rhash_head hash_node; + u8 addr[ETH_ALEN]; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS]; - u8 gtk_idx; u8 ptk_idx; struct rate_control_ref *rate_ctrl; void *rate_ctrl_priv; @@ -390,6 +431,10 @@ struct sta_info { struct ieee80211_fast_tx __rcu *fast_tx; +#ifdef CONFIG_MAC80211_MESH + struct mesh_sta *mesh; +#endif + struct work_struct drv_deliver_wk; u16 listen_interval; @@ -419,12 +464,12 @@ struct sta_info { unsigned long rx_fragments; unsigned long rx_dropped; int last_signal; - struct ewma avg_signal; + struct ewma_signal avg_signal; int last_ack_signal; u8 chains; s8 chain_signal_last[IEEE80211_MAX_CHAINS]; - struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS]; + struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS]; /* Plus 1 for non-QoS frames */ __le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1]; @@ -432,8 +477,6 @@ struct sta_info { /* Updated from TX status path only, no locking requirements */ unsigned long tx_filtered_count; unsigned long tx_retry_failed, tx_retry_count; - /* moving percentage of failed MSDUs */ - unsigned int fail_avg; /* Updated from TX path only, no locking requirements */ u64 tx_packets[IEEE80211_NUM_ACS]; @@ -455,29 +498,6 @@ struct sta_info { struct sta_ampdu_mlme ampdu_mlme; u8 timer_to_tid[IEEE80211_NUM_TIDS]; -#ifdef CONFIG_MAC80211_MESH - /* - * Mesh peer link attributes, protected by plink_lock. - * TODO: move to a sub-structure that is referenced with pointer? - */ - spinlock_t plink_lock; - u16 llid; - u16 plid; - u16 reason; - u8 plink_retries; - enum nl80211_plink_state plink_state; - u32 plink_timeout; - struct timer_list plink_timer; - - s64 t_offset; - s64 t_offset_setpoint; - /* mesh power save */ - enum nl80211_mesh_power_mode local_pm; - enum nl80211_mesh_power_mode peer_pm; - enum nl80211_mesh_power_mode nonpeer_pm; - bool processed_beacon; -#endif - #ifdef CONFIG_MAC80211_DEBUGFS struct sta_info_debugfsdentries { struct dentry *dir; @@ -498,6 +518,8 @@ struct sta_info { u8 reserved_tid; + struct cfg80211_chan_def tdls_chandef; + /* keep last! */ struct ieee80211_sta sta; }; @@ -505,7 +527,7 @@ struct sta_info { static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) { #ifdef CONFIG_MAC80211_MESH - return sta->plink_state; + return sta->mesh->plink_state; #endif return NL80211_PLINK_LISTEN; } @@ -608,7 +630,7 @@ u32 sta_addr_hash(const void *key, u32 length, u32 seed); _sta_bucket_idx(tbl, _addr), \ hash_node) \ /* compare address and run code only if it matches */ \ - if (ether_addr_equal(_sta->sta.addr, (_addr))) + if (ether_addr_equal(_sta->addr, (_addr))) /* * Get STA info by index, BROKEN! diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 45628f37c083..8ba583243509 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -515,7 +515,7 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, if (!sdata) { skb->dev = NULL; - } else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) { + } else { unsigned int hdr_size = ieee80211_hdrlen(hdr->frame_control); @@ -529,9 +529,6 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, ieee80211_mgd_conn_tx_status(sdata, hdr->frame_control, acked); - } else { - /* we assign ack frame ID for the others */ - WARN_ON(1); } rcu_read_unlock(); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 8db6e2994bbc..aee701a5649e 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -4,6 +4,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH + * Copyright 2015 Intel Deutschland GmbH * * This file is GPLv2 as found in COPYING. */ @@ -11,6 +12,7 @@ #include #include #include +#include #include "ieee80211_i.h" #include "driver-ops.h" @@ -35,20 +37,28 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk) mutex_unlock(&local->mtx); } -static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local, +static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - u8 *pos = (void *)skb_put(skb, 7); + struct ieee80211_local *local = sdata->local; bool chan_switch = local->hw.wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH; + bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW); + enum ieee80211_band band = ieee80211_get_sdata_band(sdata); + struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; + bool vht = sband && sband->vht_cap.vht_supported; + u8 *pos = (void *)skb_put(skb, 10); *pos++ = WLAN_EID_EXT_CAPABILITY; - *pos++ = 5; /* len */ + *pos++ = 8; /* len */ *pos++ = 0x0; *pos++ = 0x0; *pos++ = 0x0; *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0; *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; + *pos++ = 0; + *pos++ = 0; + *pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0; } static u8 @@ -283,6 +293,60 @@ static void ieee80211_tdls_add_wmm_param_ie(struct ieee80211_sub_if_data *sdata, } } +static void +ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) +{ + /* IEEE802.11ac-2013 Table E-4 */ + u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; + struct cfg80211_chan_def uc = sta->tdls_chandef; + enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); + int i; + + /* only support upgrading non-narrow channels up to 80Mhz */ + if (max_width == NL80211_CHAN_WIDTH_5 || + max_width == NL80211_CHAN_WIDTH_10) + return; + + if (max_width > NL80211_CHAN_WIDTH_80) + max_width = NL80211_CHAN_WIDTH_80; + + if (uc.width == max_width) + return; + /* + * Channel usage constrains in the IEEE802.11ac-2013 specification only + * allow expanding a 20MHz channel to 80MHz in a single way. In + * addition, there are no 40MHz allowed channels that are not part of + * the allowed 80MHz range in the 5GHz spectrum (the relevant one here). + */ + for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) + if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { + uc.center_freq1 = centers_80mhz[i]; + uc.width = NL80211_CHAN_WIDTH_80; + break; + } + + if (!uc.center_freq1) + return; + + /* proceed to downgrade the chandef until usable or the same */ + while (uc.width > max_width && + !cfg80211_reg_can_beacon(sdata->local->hw.wiphy, + &uc, sdata->wdev.iftype)) + ieee80211_chandef_downgrade(&uc); + + if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) { + tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n", + sta->tdls_chandef.width, uc.width); + + /* + * the station is not yet authorized when BW upgrade is done, + * locking is not required + */ + sta->tdls_chandef = uc; + } +} + static void ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, const u8 *peer, @@ -320,7 +384,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - ieee80211_tdls_add_ext_capab(local, skb); + ieee80211_tdls_add_ext_capab(sdata, skb); /* add the QoS element if we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && @@ -350,15 +414,17 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - rcu_read_lock(); + mutex_lock(&local->sta_mtx); /* we should have the peer STA if we're already responding */ if (action_code == WLAN_TDLS_SETUP_RESPONSE) { sta = sta_info_get(sdata, peer); if (WARN_ON_ONCE(!sta)) { - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); return; } + + sta->tdls_chandef = sdata->vif.bss_conf.chandef; } ieee80211_tdls_add_oper_classes(sdata, skb); @@ -384,10 +450,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) { - /* disable SMPS in TDLS responder */ - sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED - << IEEE80211_HT_CAP_SM_PS_SHIFT; - /* the peer caps are already intersected with our own */ memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap)); @@ -448,9 +510,16 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); + + /* + * if both peers support WIDER_BW, we can expand the chandef to + * a wider compatible one, up to 80MHz + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) + ieee80211_tdls_chandef_vht_upgrade(sdata, sta); } - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { @@ -474,15 +543,17 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, enum ieee80211_band band = ieee80211_get_sdata_band(sdata); u8 *pos; - rcu_read_lock(); + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); ap_sta = sta_info_get(sdata, ifmgd->bssid); if (WARN_ON_ONCE(!sta || !ap_sta)) { - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); return; } + sta->tdls_chandef = sdata->vif.bss_conf.chandef; + /* add any custom IEs that go before the QoS IE */ if (extra_ies_len) { static const u8 before_qos[] = { @@ -530,12 +601,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, /* only include VHT-operation if not on the 2.4GHz band */ if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) { + /* + * if both peers support WIDER_BW, we can expand the chandef to + * a wider compatible one, up to 80MHz + */ + if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) + ieee80211_tdls_chandef_vht_upgrade(sdata, sta); + pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation)); ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap, - &sdata->vif.bss_conf.chandef); + &sta->tdls_chandef); } - rcu_read_unlock(); + mutex_unlock(&local->sta_mtx); /* add any remaining IEs */ if (extra_ies_len) { @@ -784,7 +862,7 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, max(sizeof(struct ieee80211_mgmt), sizeof(struct ieee80211_tdls_data)) + 50 + /* supported rates */ - 7 + /* ext capab */ + 10 + /* ext capab */ 26 + /* max(WMM-info, WMM-param) */ 2 + max(sizeof(struct ieee80211_ht_cap), sizeof(struct ieee80211_ht_operation)) + @@ -983,8 +1061,17 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; + enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode; int ret; + /* don't support setup with forced SMPS mode that's not off */ + if (smps_mode != IEEE80211_SMPS_AUTOMATIC && + smps_mode != IEEE80211_SMPS_OFF) { + tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n", + smps_mode); + return -ENOTSUPP; + } + mutex_lock(&local->mtx); /* we don't support concurrent TDLS peer setups */ @@ -1146,6 +1233,22 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, return ret; } +static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + + mutex_lock(&local->chanctx_mtx); + conf = rcu_dereference_protected(sdata->vif.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (conf) { + ctx = container_of(conf, struct ieee80211_chanctx, conf); + ieee80211_recalc_chanctx_chantype(local, ctx); + } + mutex_unlock(&local->chanctx_mtx); +} + int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper) { @@ -1182,6 +1285,8 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; } + iee80211_tdls_recalc_chanctx(sdata); + rcu_read_lock(); sta = sta_info_get(sdata, peer); if (!sta) { @@ -1213,6 +1318,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, ieee80211_flush_queues(local, sdata, false); ret = sta_info_destroy_addr(sdata, peer); + iee80211_tdls_recalc_chanctx(sdata); break; default: ret = -ENOTSUPP; @@ -1224,6 +1330,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, eth_zero_addr(sdata->u.mgd.tdls_peer); } + if (ret == 0) + ieee80211_queue_work(&sdata->local->hw, + &sdata->u.mgd.request_smps_work); + mutex_unlock(&local->mtx); return ret; } @@ -1627,6 +1737,31 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, return -EINVAL; } + if (!elems.sec_chan_offs) { + chan_type = NL80211_CHAN_HT20; + } else { + switch (elems.sec_chan_offs->sec_chan_offs) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + chan_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + chan_type = NL80211_CHAN_HT40MINUS; + break; + default: + chan_type = NL80211_CHAN_HT20; + break; + } + } + + cfg80211_chandef_create(&chandef, chan, chan_type); + + /* we will be active on the TDLS link */ + if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef, + sdata->wdev.iftype)) { + tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n"); + return -EINVAL; + } + mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, tf->sa); if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) { @@ -1647,27 +1782,15 @@ ieee80211_process_tdls_channel_switch_req(struct ieee80211_sub_if_data *sdata, goto out; } - if (!sta->sta.ht_cap.ht_supported) { - chan_type = NL80211_CHAN_NO_HT; - } else if (!elems.sec_chan_offs) { - chan_type = NL80211_CHAN_HT20; - } else { - switch (elems.sec_chan_offs->sec_chan_offs) { - case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: - chan_type = NL80211_CHAN_HT40PLUS; - break; - case IEEE80211_HT_PARAM_CHA_SEC_BELOW: - chan_type = NL80211_CHAN_HT40MINUS; - break; - default: - chan_type = NL80211_CHAN_HT20; - break; - } + /* peer should have known better */ + if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs && + elems.sec_chan_offs->sec_chan_offs) { + tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n"); + ret = -ENOTSUPP; + goto out; } - cfg80211_chandef_create(&chandef, chan, chan_type); params.chandef = &chandef; - params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time); params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout); @@ -1691,12 +1814,15 @@ out: return ret; } -void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb) +static void +ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) { struct ieee80211_tdls_data *tf = (void *)skb->data; struct wiphy *wiphy = sdata->local->hw.wiphy; + ASSERT_RTNL(); + /* make sure the driver supports it */ if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH)) return; @@ -1720,3 +1846,47 @@ void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata, return; } } + +void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata) +{ + struct sta_info *sta; + u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED; + + rcu_read_lock(); + list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) { + if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded || + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + continue; + + ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr, + NL80211_TDLS_TEARDOWN, reason, + GFP_ATOMIC); + } + rcu_read_unlock(); +} + +void ieee80211_tdls_chsw_work(struct work_struct *wk) +{ + struct ieee80211_local *local = + container_of(wk, struct ieee80211_local, tdls_chsw_work); + struct ieee80211_sub_if_data *sdata; + struct sk_buff *skb; + struct ieee80211_tdls_data *tf; + + rtnl_lock(); + while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) { + tf = (struct ieee80211_tdls_data *)skb->data; + list_for_each_entry(sdata, &local->interfaces, list) { + if (!ieee80211_sdata_running(sdata) || + sdata->vif.type != NL80211_IFTYPE_STATION || + !ether_addr_equal(tf->da, sdata->vif.addr)) + continue; + + ieee80211_process_tdls_channel_switch(sdata, skb); + break; + } + + kfree_skb(skb); + } + rtnl_unlock(); +} diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b8233505bf9f..84e0e8c7fb23 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -311,9 +311,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) return TX_CONTINUE; - if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) - return TX_CONTINUE; - if (tx->flags & IEEE80211_TX_PS_BUFFERED) return TX_CONTINUE; @@ -610,7 +607,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) if (tx->key) { bool skip_hw = false; - tx->key->tx_rx_count++; /* TODO: add threshold stuff again */ switch (tx->key->conf.cipher) { @@ -690,7 +686,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || - tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); + tx->sdata->vif.type == NL80211_IFTYPE_ADHOC || + tx->sdata->vif.type == NL80211_IFTYPE_OCB); /* set up RTS protection if desired */ if (len > tx->local->hw.wiphy->rts_threshold) { @@ -2777,7 +2774,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, sdata->sequence_number += 0x10; } - sta->tx_msdu[tid]++; + if (skb_shinfo(skb)->gso_size) + sta->tx_msdu[tid] += + DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); + else + sta->tx_msdu[tid]++; info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; @@ -3213,6 +3214,16 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); } +static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon) +{ + beacon->csa_current_counter--; + + /* the counter should never reach 0 */ + WARN_ON_ONCE(!beacon->csa_current_counter); + + return beacon->csa_current_counter; +} + u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); @@ -3231,11 +3242,7 @@ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif) if (!beacon) goto unlock; - beacon->csa_current_counter--; - - /* the counter should never reach 0 */ - WARN_ON_ONCE(!beacon->csa_current_counter); - count = beacon->csa_current_counter; + count = __ieee80211_csa_update_counter(beacon); unlock: rcu_read_unlock(); @@ -3335,7 +3342,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, if (beacon) { if (beacon->csa_counter_offsets[0]) { if (!is_template) - ieee80211_csa_update_counter(vif); + __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } @@ -3381,7 +3388,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, if (beacon->csa_counter_offsets[0]) { if (!is_template) - ieee80211_csa_update_counter(vif); + __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } @@ -3411,7 +3418,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw, * for now we leave it consistent with overall * mac80211's behavior. */ - ieee80211_csa_update_counter(vif); + __ieee80211_csa_update_counter(beacon); ieee80211_set_csa(sdata, beacon); } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 43e5aadd7a89..1104421bc525 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -47,55 +47,6 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) } EXPORT_SYMBOL(wiphy_to_ieee80211_hw); -u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, - enum nl80211_iftype type) -{ - __le16 fc = hdr->frame_control; - - /* drop ACK/CTS frames and incorrect hdr len (ctrl) */ - if (len < 16) - return NULL; - - if (ieee80211_is_data(fc)) { - if (len < 24) /* drop incorrect hdr len (data) */ - return NULL; - - if (ieee80211_has_a4(fc)) - return NULL; - if (ieee80211_has_tods(fc)) - return hdr->addr1; - if (ieee80211_has_fromds(fc)) - return hdr->addr2; - - return hdr->addr3; - } - - if (ieee80211_is_mgmt(fc)) { - if (len < 24) /* drop incorrect hdr len (mgmt) */ - return NULL; - return hdr->addr3; - } - - if (ieee80211_is_ctl(fc)) { - if (ieee80211_is_pspoll(fc)) - return hdr->addr1; - - if (ieee80211_is_back_req(fc)) { - switch (type) { - case NL80211_IFTYPE_STATION: - return hdr->addr2; - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_AP_VLAN: - return hdr->addr1; - default: - break; /* fall through to the return */ - } - } - } - - return NULL; -} - void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { struct sk_buff *skb; @@ -752,7 +703,12 @@ EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif); struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_sub_if_data *sdata; + + if (!vif) + return NULL; + + sdata = vif_to_sdata(vif); if (!ieee80211_sdata_running(sdata) || !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) @@ -1709,6 +1665,7 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) local->resuming = false; local->suspended = false; local->started = false; + local->in_reconfig = false; /* scheduled scan clearly can't be running any more, but tell * cfg80211 and clear local state @@ -1759,16 +1716,24 @@ int ieee80211_reconfig(struct ieee80211_local *local) struct ieee80211_sub_if_data *sched_scan_sdata; struct cfg80211_sched_scan_request *sched_scan_req; bool sched_scan_stopped = false; + bool suspended = local->suspended; /* nothing to do if HW shouldn't run */ if (!local->open_count) goto wake_up; #ifdef CONFIG_PM - if (local->suspended) + if (suspended) local->resuming = true; if (local->wowlan) { + /* + * In the wowlan case, both mac80211 and the device + * are functional when the resume op is called, so + * clear local->suspended so the device could operate + * normally (e.g. pass rx frames). + */ + local->suspended = false; res = drv_resume(local); local->wowlan = false; if (res < 0) { @@ -1781,8 +1746,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* * res is 1, which means the driver requested * to go through a regular reset on wakeup. + * restore local->suspended in this case. */ reconfig_due_to_wowlan = true; + local->suspended = true; } #endif @@ -1794,7 +1761,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) */ res = drv_start(local); if (res) { - if (local->suspended) + if (suspended) WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); else WARN(1, "Hardware became unavailable during restart.\n"); @@ -2088,10 +2055,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) * If this is for hw restart things are still running. * We may want to change that later, however. */ - if (local->open_count && (!local->suspended || reconfig_due_to_wowlan)) + if (local->open_count && (!suspended || reconfig_due_to_wowlan)) drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); - if (!local->suspended) + if (!suspended) return 0; #ifdef CONFIG_PM diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 80694d55db74..834ccdbc74be 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -308,11 +308,15 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; enum ieee80211_sta_rx_bandwidth bw; + enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width; - bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width); - bw = min(bw, ieee80211_sta_cap_rx_bw(sta)); + bw = ieee80211_sta_cap_rx_bw(sta); bw = min(bw, sta->cur_max_bandwidth); + /* do not cap the BW of TDLS WIDER_BW peers by the bss */ + if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + return bw; } @@ -422,3 +426,29 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, if (changed > 0) rate_control_rate_update(local, sband, sta, changed); } + +void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, + u16 vht_mask[NL80211_VHT_NSS_MAX]) +{ + int i; + u16 mask, cap = le16_to_cpu(vht_cap); + + for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { + mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED; + switch (mask) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + vht_mask[i] = 0x00FF; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + vht_mask[i] = 0x01FF; + break; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + vht_mask[i] = 0x03FF; + break; + case IEEE80211_VHT_MCS_NOT_SUPPORTED: + default: + vht_mask[i] = 0; + break; + } + } +} diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 943f7606527e..feb547dc8643 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -516,30 +516,33 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx, return RX_DROP_UNUSABLE; } - ccmp_hdr2pn(pn, skb->data + hdrlen); + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + ccmp_hdr2pn(pn, skb->data + hdrlen); - queue = rx->security_idx; + queue = rx->security_idx; - if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) { - key->u.ccmp.replays++; - return RX_DROP_UNUSABLE; - } - - if (!(status->flag & RX_FLAG_DECRYPTED)) { - u8 aad[2 * AES_BLOCK_SIZE]; - u8 b_0[AES_BLOCK_SIZE]; - /* hardware didn't decrypt/verify MIC */ - ccmp_special_blocks(skb, pn, b_0, aad); - - if (ieee80211_aes_ccm_decrypt( - key->u.ccmp.tfm, b_0, aad, - skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, - data_len, - skb->data + skb->len - mic_len, mic_len)) + if (memcmp(pn, key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN) <= 0) { + key->u.ccmp.replays++; return RX_DROP_UNUSABLE; - } + } - memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); + if (!(status->flag & RX_FLAG_DECRYPTED)) { + u8 aad[2 * AES_BLOCK_SIZE]; + u8 b_0[AES_BLOCK_SIZE]; + /* hardware didn't decrypt/verify MIC */ + ccmp_special_blocks(skb, pn, b_0, aad); + + if (ieee80211_aes_ccm_decrypt( + key->u.ccmp.tfm, b_0, aad, + skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN, + data_len, + skb->data + skb->len - mic_len, mic_len)) + return RX_DROP_UNUSABLE; + } + + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); + } /* Remove CCMP header and MIC */ if (pskb_trim(skb, skb->len - mic_len)) @@ -739,30 +742,34 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx) return RX_DROP_UNUSABLE; } - gcmp_hdr2pn(pn, skb->data + hdrlen); + if (!(status->flag & RX_FLAG_PN_VALIDATED)) { + gcmp_hdr2pn(pn, skb->data + hdrlen); - queue = rx->security_idx; + queue = rx->security_idx; - if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) { - key->u.gcmp.replays++; - return RX_DROP_UNUSABLE; - } - - if (!(status->flag & RX_FLAG_DECRYPTED)) { - u8 aad[2 * AES_BLOCK_SIZE]; - u8 j_0[AES_BLOCK_SIZE]; - /* hardware didn't decrypt/verify MIC */ - gcmp_special_blocks(skb, pn, j_0, aad); - - if (ieee80211_aes_gcm_decrypt( - key->u.gcmp.tfm, j_0, aad, - skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, - data_len, - skb->data + skb->len - IEEE80211_GCMP_MIC_LEN)) + if (memcmp(pn, key->u.gcmp.rx_pn[queue], + IEEE80211_GCMP_PN_LEN) <= 0) { + key->u.gcmp.replays++; return RX_DROP_UNUSABLE; - } + } - memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); + if (!(status->flag & RX_FLAG_DECRYPTED)) { + u8 aad[2 * AES_BLOCK_SIZE]; + u8 j_0[AES_BLOCK_SIZE]; + /* hardware didn't decrypt/verify MIC */ + gcmp_special_blocks(skb, pn, j_0, aad); + + if (ieee80211_aes_gcm_decrypt( + key->u.gcmp.tfm, j_0, aad, + skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN, + data_len, + skb->data + skb->len - + IEEE80211_GCMP_MIC_LEN)) + return RX_DROP_UNUSABLE; + } + + memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); + } /* Remove GCMP header and MIC */ if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN)) diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c index f7ba51e8b4ca..c865ebb2ace2 100644 --- a/net/mac802154/cfg.c +++ b/net/mac802154/cfg.c @@ -209,10 +209,6 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, { ASSERT_RTNL(); - if (wpan_dev->min_be == min_be && - wpan_dev->max_be == max_be) - return 0; - wpan_dev->min_be = min_be; wpan_dev->max_be = max_be; return 0; @@ -224,9 +220,6 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); - if (wpan_dev->short_addr == short_addr) - return 0; - wpan_dev->short_addr = short_addr; return 0; } @@ -238,9 +231,6 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, { ASSERT_RTNL(); - if (wpan_dev->csma_retries == max_csma_backoffs) - return 0; - wpan_dev->csma_retries = max_csma_backoffs; return 0; } @@ -252,9 +242,6 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, { ASSERT_RTNL(); - if (wpan_dev->frame_retries == max_frame_retries) - return 0; - wpan_dev->frame_retries = max_frame_retries; return 0; } @@ -265,13 +252,20 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, { ASSERT_RTNL(); - if (wpan_dev->lbt == mode) - return 0; - wpan_dev->lbt = mode; return 0; } +static int +ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy, + struct wpan_dev *wpan_dev, bool ackreq) +{ + ASSERT_RTNL(); + + wpan_dev->ackreq = ackreq; + return 0; +} + const struct cfg802154_ops mac802154_config_ops = { .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated, .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated, @@ -289,4 +283,5 @@ const struct cfg802154_ops mac802154_config_ops = { .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs, .set_max_frame_retries = ieee802154_set_max_frame_retries, .set_lbt_mode = ieee802154_set_lbt_mode, + .set_ackreq_default = ieee802154_set_ackreq_default, }; diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 416de903e467..ed26952f9e14 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -125,6 +125,14 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) if (netif_running(dev)) return -EBUSY; + /* lowpan need to be down for update + * SLAAC address after ifup + */ + if (sdata->wpan_dev.lowpan_dev) { + if (netif_running(sdata->wpan_dev.lowpan_dev)) + return -EBUSY; + } + ieee802154_be64_to_le64(&extended_addr, addr->sa_data); if (!ieee802154_is_valid_extended_unicast_addr(extended_addr)) return -EINVAL; @@ -132,6 +140,13 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); sdata->wpan_dev.extended_addr = extended_addr; + /* update lowpan interface mac address when + * wpan mac has been changed + */ + if (sdata->wpan_dev.lowpan_dev) + memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr, + dev->addr_len); + return mac802154_wpan_update_llsec(dev); } @@ -483,8 +498,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata, wpan_dev->min_be = 3; wpan_dev->max_be = 5; wpan_dev->csma_retries = 4; - /* for compatibility, actual default is 3 */ - wpan_dev->frame_retries = -1; + wpan_dev->frame_retries = 3; wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); diff --git a/net/mac802154/main.c b/net/mac802154/main.c index 9e55431b9a5c..e8cab5bb80c6 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -111,7 +111,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) phy->supported.max_minbe = 8; phy->supported.min_maxbe = 3; phy->supported.max_maxbe = 8; - phy->supported.min_frame_retries = -1; + phy->supported.min_frame_retries = 0; phy->supported.max_frame_retries = 7; phy->supported.max_csma_backoffs = 5; phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE; @@ -177,11 +177,8 @@ int ieee802154_register_hw(struct ieee802154_hw *hw) } if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) { - /* TODO should be 3, but our default value is -1 which means - * no ARET handling. - */ - local->phy->supported.min_frame_retries = -1; - local->phy->supported.max_frame_retries = -1; + local->phy->supported.min_frame_retries = 3; + local->phy->supported.max_frame_retries = 3; } if (hw->flags & IEEE802154_HW_PROMISCUOUS) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 04306989d054..8c5707db53c5 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -27,11 +27,23 @@ /* This maximum ha length copied from the definition of struct neighbour */ #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))) +enum mpls_payload_type { + MPT_UNSPEC, /* IPv4 or IPv6 */ + MPT_IPV4 = 4, + MPT_IPV6 = 6, + + /* Other types not implemented: + * - Pseudo-wire with or without control word (RFC4385) + * - GAL (RFC5586) + */ +}; + struct mpls_route { /* next hop label forwarding entry */ struct net_device __rcu *rt_dev; struct rcu_head rt_rcu; u32 rt_label[MAX_NEW_LABELS]; u8 rt_protocol; /* routing protocol that set this entry */ + u8 rt_payload_type; u8 rt_labels; u8 rt_via_alen; u8 rt_via_table; @@ -96,16 +108,8 @@ EXPORT_SYMBOL_GPL(mpls_pkt_too_big); static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, struct mpls_entry_decoded dec) { - /* RFC4385 and RFC5586 encode other packets in mpls such that - * they don't conflict with the ip version number, making - * decoding by examining the ip version correct in everything - * except for the strangest cases. - * - * The strange cases if we choose to support them will require - * manual configuration. - */ - struct iphdr *hdr4; - bool success = true; + enum mpls_payload_type payload_type; + bool success = false; /* The IPv4 code below accesses through the IPv4 header * checksum, which is 12 bytes into the packet. @@ -120,23 +124,32 @@ static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb, if (!pskb_may_pull(skb, 12)) return false; - /* Use ip_hdr to find the ip protocol version */ - hdr4 = ip_hdr(skb); - if (hdr4->version == 4) { + payload_type = rt->rt_payload_type; + if (payload_type == MPT_UNSPEC) + payload_type = ip_hdr(skb)->version; + + switch (payload_type) { + case MPT_IPV4: { + struct iphdr *hdr4 = ip_hdr(skb); skb->protocol = htons(ETH_P_IP); csum_replace2(&hdr4->check, htons(hdr4->ttl << 8), htons(dec.ttl << 8)); hdr4->ttl = dec.ttl; + success = true; + break; } - else if (hdr4->version == 6) { + case MPT_IPV6: { struct ipv6hdr *hdr6 = ipv6_hdr(skb); skb->protocol = htons(ETH_P_IPV6); hdr6->hop_limit = dec.ttl; + success = true; + break; } - else - /* version 0 and version 1 are used by pseudo wires */ - success = false; + case MPT_UNSPEC: + break; + } + return success; } @@ -255,16 +268,17 @@ static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = { }; struct mpls_route_config { - u32 rc_protocol; - u32 rc_ifindex; - u16 rc_via_table; - u16 rc_via_alen; - u8 rc_via[MAX_VIA_ALEN]; - u32 rc_label; - u32 rc_output_labels; - u32 rc_output_label[MAX_NEW_LABELS]; - u32 rc_nlflags; - struct nl_info rc_nlinfo; + u32 rc_protocol; + u32 rc_ifindex; + u16 rc_via_table; + u16 rc_via_alen; + u8 rc_via[MAX_VIA_ALEN]; + u32 rc_label; + u32 rc_output_labels; + u32 rc_output_label[MAX_NEW_LABELS]; + u32 rc_nlflags; + enum mpls_payload_type rc_payload_type; + struct nl_info rc_nlinfo; }; static struct mpls_route *mpls_rt_alloc(size_t alen) @@ -493,6 +507,7 @@ static int mpls_route_add(struct mpls_route_config *cfg) rt->rt_label[i] = cfg->rc_output_label[i]; rt->rt_protocol = cfg->rc_protocol; RCU_INIT_POINTER(rt->rt_dev, dev); + rt->rt_payload_type = cfg->rc_payload_type; rt->rt_via_table = cfg->rc_via_table; memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen); @@ -1047,6 +1062,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) goto nort0; RCU_INIT_POINTER(rt0->rt_dev, lo); rt0->rt_protocol = RTPROT_KERNEL; + rt0->rt_payload_type = MPT_IPV4; rt0->rt_via_table = NEIGH_LINK_TABLE; memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len); } @@ -1057,6 +1073,7 @@ static int resize_platform_label_table(struct net *net, size_t limit) goto nort2; RCU_INIT_POINTER(rt2->rt_dev, lo); rt2->rt_protocol = RTPROT_KERNEL; + rt2->rt_payload_type = MPT_IPV6; rt2->rt_via_table = NEIGH_LINK_TABLE; memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len); } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 48521d62c672..ac3be9b0629b 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -294,7 +294,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, { struct nf_conn *tmpl; - tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); + tmpl = kzalloc(sizeof(*tmpl), flags); if (tmpl == NULL) return NULL; @@ -1547,10 +1547,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); - if (!hash) { - printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); + if (!hash) hash = vzalloc(sz); - } if (hash && nulls) for (i = 0; i < nr_slots; i++) diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index ce3e840c8704..dff0f0cc59e4 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -103,9 +103,9 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb, ntohl(sack->end_seq), ntohl(new_end_seq)); inet_proto_csum_replace4(&tcph->check, skb, - sack->start_seq, new_start_seq, 0); + sack->start_seq, new_start_seq, false); inet_proto_csum_replace4(&tcph->check, skb, - sack->end_seq, new_end_seq, 0); + sack->end_seq, new_end_seq, false); sack->start_seq = new_start_seq; sack->end_seq = new_end_seq; sackoff += sizeof(*sack); @@ -193,8 +193,9 @@ int nf_ct_seq_adjust(struct sk_buff *skb, newseq = htonl(ntohl(tcph->seq) + seqoff); newack = htonl(ntohl(tcph->ack_seq) - ackoff); - inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0); - inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0); + inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false); + inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, + false); pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n", ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq), diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c index b8067b53ff3a..15c47b246d0d 100644 --- a/net/netfilter/nf_nat_proto_dccp.c +++ b/net/netfilter/nf_nat_proto_dccp.c @@ -69,7 +69,7 @@ dccp_manip_pkt(struct sk_buff *skb, l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype); inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport, - 0); + false); return true; } diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c index 37f5505f4529..4f8820fc5148 100644 --- a/net/netfilter/nf_nat_proto_tcp.c +++ b/net/netfilter/nf_nat_proto_tcp.c @@ -70,7 +70,7 @@ tcp_manip_pkt(struct sk_buff *skb, return true; l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); - inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0); + inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false); return true; } diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c index b0ede2f0d8bc..b1e627227b6e 100644 --- a/net/netfilter/nf_nat_proto_udp.c +++ b/net/netfilter/nf_nat_proto_udp.c @@ -57,7 +57,7 @@ udp_manip_pkt(struct sk_buff *skb, l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, - 0); + false); if (!hdr->check) hdr->check = CSUM_MANGLED_0; } diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c index 368f14e01e75..58340c97bd83 100644 --- a/net/netfilter/nf_nat_proto_udplite.c +++ b/net/netfilter/nf_nat_proto_udplite.c @@ -56,7 +56,7 @@ udplite_manip_pkt(struct sk_buff *skb, } l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); - inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0); + inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false); if (!hdr->check) hdr->check = CSUM_MANGLED_0; diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 58b2e84dab27..8fbbdb09826e 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -227,7 +227,7 @@ unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, synproxy->tsoff); } inet_proto_csum_replace4(&th->check, skb, - old, *ptr, 0); + old, *ptr, false); return 1; } optoff += op[1]; @@ -355,10 +355,8 @@ static int __net_init synproxy_net_init(struct net *net) int err = -ENOMEM; ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL); - if (IS_ERR(ct)) { - err = PTR_ERR(ct); + if (!ct) goto err1; - } if (!nfct_seqadj_ext_add(ct)) goto err2; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 346509825a80..8e524898ccea 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -224,9 +224,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, zone.flags |= NF_CT_FLAG_MARK; ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL); - ret = PTR_ERR(ct); - if (IS_ERR(ct)) + if (!ct) { + ret = -ENOMEM; goto err2; + } ret = 0; if ((info->ct_events || info->exp_events) && diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 8c3190e2fc6a..8c02501a530f 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -144,7 +144,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, inet_proto_csum_replace2(&tcph->check, skb, htons(oldmss), htons(newmss), - 0); + false); return 0; } } @@ -185,18 +185,18 @@ tcpmss_mangle_packet(struct sk_buff *skb, memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); inet_proto_csum_replace2(&tcph->check, skb, - htons(len), htons(len + TCPOLEN_MSS), 1); + htons(len), htons(len + TCPOLEN_MSS), true); opt[0] = TCPOPT_MSS; opt[1] = TCPOLEN_MSS; opt[2] = (newmss & 0xff00) >> 8; opt[3] = newmss & 0x00ff; - inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0); + inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false); oldval = ((__be16 *)tcph)[6]; tcph->doff += TCPOLEN_MSS/4; inet_proto_csum_replace2(&tcph->check, skb, - oldval, ((__be16 *)tcph)[6], 0); + oldval, ((__be16 *)tcph)[6], false); return TCPOLEN_MSS; } diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index 625fa1d636a0..eb92bffff11c 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c @@ -80,7 +80,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, n <<= 8; } inet_proto_csum_replace2(&tcph->check, skb, htons(o), - htons(n), 0); + htons(n), false); } memset(opt + i, TCPOPT_NOP, optl); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d8e2e3918ce2..67d210477863 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid) err = __netlink_insert(table, sk); if (err) { + /* In case the hashtable backend returns with -EBUSY + * from here, it must not escape to the caller. + */ + if (unlikely(err == -EBUSY)) + err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; nlk_sk(sk)->portid = 0; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 15840401a2ce..422dc0567de9 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -34,7 +34,7 @@ config OPENVSWITCH config OPENVSWITCH_GRE tristate "Open vSwitch GRE tunneling support" depends on OPENVSWITCH - depends on NET_IPGRE_DEMUX + depends on NET_IPGRE default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create GRE diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index cf04c2f8b32a..4f4200717bef 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, return 0; } -static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) +static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, + __be32 addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); + if (nh->frag_off & htons(IP_OFFSET)) + return; + if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, - *addr, new_addr, 1); + addr, new_addr, true); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, - *addr, new_addr, 1); + addr, new_addr, true); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } +} +static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, + __be32 *addr, __be32 new_addr) +{ + update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); *addr = new_addr; @@ -308,14 +316,14 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, if (l4_proto == NEXTHDR_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, - addr, new_addr, 1); + addr, new_addr, true); } else if (l4_proto == NEXTHDR_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace16(&uh->check, skb, - addr, new_addr, 1); + addr, new_addr, true); if (!uh->check) uh->check = CSUM_MANGLED_0; } @@ -323,7 +331,7 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, } else if (l4_proto == NEXTHDR_ICMP) { if (likely(transport_len >= sizeof(struct icmp6hdr))) inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, - skb, addr, new_addr, 1); + skb, addr, new_addr, true); } } @@ -490,7 +498,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { - inet_proto_csum_replace2(check, skb, *port, new_port, 0); + inet_proto_csum_replace2(check, skb, *port, new_port, false); *port = new_port; } @@ -669,9 +677,12 @@ static int sample(struct datapath *dp, struct sk_buff *skb, for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { + u32 probability; + switch (nla_type(a)) { case OVS_SAMPLE_ATTR_PROBABILITY: - if (prandom_u32() >= nla_get_u32(a)) + probability = nla_get_u32(a); + if (!probability || prandom_u32() > probability) return 0; break; diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index b87656c66aaf..871801d2ac23 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -45,235 +45,43 @@ #include "datapath.h" #include "vport.h" +#include "vport-netdev.h" static struct vport_ops ovs_gre_vport_ops; -/* Returns the least-significant 32 bits of a __be64. */ -static __be32 be64_get_low32(__be64 x) +static struct vport *gre_tnl_create(const struct vport_parms *parms) { -#ifdef __BIG_ENDIAN - return (__force __be32)x; -#else - return (__force __be32)((__force u64)x >> 32); -#endif -} - -static __be16 filter_tnl_flags(__be16 flags) -{ - return flags & (TUNNEL_CSUM | TUNNEL_KEY); -} - -static struct sk_buff *__build_header(struct sk_buff *skb, - int tunnel_hlen) -{ - struct tnl_ptk_info tpi; - const struct ip_tunnel_key *tun_key; - - tun_key = &OVS_CB(skb)->egress_tun_info->key; - - skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); - if (IS_ERR(skb)) - return skb; - - tpi.flags = filter_tnl_flags(tun_key->tun_flags); - tpi.proto = htons(ETH_P_TEB); - tpi.key = be64_get_low32(tun_key->tun_id); - tpi.seq = 0; - gre_build_header(skb, &tpi, tunnel_hlen); - - return skb; -} - -static __be64 key_to_tunnel_id(__be32 key, __be32 seq) -{ -#ifdef __BIG_ENDIAN - return (__force __be64)((__force u64)seq << 32 | (__force u32)key); -#else - return (__force __be64)((__force u64)key << 32 | (__force u32)seq); -#endif -} - -/* Called with rcu_read_lock and BH disabled. */ -static int gre_rcv(struct sk_buff *skb, - const struct tnl_ptk_info *tpi) -{ - struct ip_tunnel_info tun_info; - struct ovs_net *ovs_net; - struct vport *vport; - __be64 key; - - ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); - vport = rcu_dereference(ovs_net->vport_net.gre_vport); - if (unlikely(!vport)) - return PACKET_REJECT; - - key = key_to_tunnel_id(tpi->key, tpi->seq); - ip_tunnel_info_init(&tun_info, ip_hdr(skb), 0, 0, key, - filter_tnl_flags(tpi->flags), NULL, 0); - - ovs_vport_receive(vport, skb, &tun_info); - return PACKET_RCVD; -} - -/* Called with rcu_read_lock and BH disabled. */ -static int gre_err(struct sk_buff *skb, u32 info, - const struct tnl_ptk_info *tpi) -{ - struct ovs_net *ovs_net; + struct net *net = ovs_dp_get_net(parms->dp); + struct net_device *dev; struct vport *vport; - ovs_net = net_generic(dev_net(skb->dev), ovs_net_id); - vport = rcu_dereference(ovs_net->vport_net.gre_vport); + vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); + if (IS_ERR(vport)) + return vport; - if (unlikely(!vport)) - return PACKET_REJECT; - else - return PACKET_RCVD; -} - -static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) -{ - struct net *net = ovs_dp_get_net(vport->dp); - const struct ip_tunnel_key *tun_key; - struct flowi4 fl; - struct rtable *rt; - int min_headroom; - int tunnel_hlen; - __be16 df; - int err; - - if (unlikely(!OVS_CB(skb)->egress_tun_info)) { - err = -EINVAL; - goto err_free_skb; + rtnl_lock(); + dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER); + if (IS_ERR(dev)) { + rtnl_unlock(); + ovs_vport_free(vport); + return ERR_CAST(dev); } - tun_key = &OVS_CB(skb)->egress_tun_info->key; - rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE); - if (IS_ERR(rt)) { - err = PTR_ERR(rt); - goto err_free_skb; - } + dev_change_flags(dev, dev->flags | IFF_UP); + rtnl_unlock(); - tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); - - min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len - + tunnel_hlen + sizeof(struct iphdr) - + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); - if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) { - int head_delta = SKB_DATA_ALIGN(min_headroom - - skb_headroom(skb) + - 16); - err = pskb_expand_head(skb, max_t(int, head_delta, 0), - 0, GFP_ATOMIC); - if (unlikely(err)) - goto err_free_rt; - } - - skb = vlan_hwaccel_push_inside(skb); - if (unlikely(!skb)) { - err = -ENOMEM; - goto err_free_rt; - } - - /* Push Tunnel header. */ - skb = __build_header(skb, tunnel_hlen); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - skb = NULL; - goto err_free_rt; - } - - df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? - htons(IP_DF) : 0; - - skb->ignore_df = 1; - - return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, - tun_key->ipv4_dst, IPPROTO_GRE, - tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); -err_free_rt: - ip_rt_put(rt); -err_free_skb: - kfree_skb(skb); - return err; -} - -static struct gre_cisco_protocol gre_protocol = { - .handler = gre_rcv, - .err_handler = gre_err, - .priority = 1, -}; - -static int gre_ports; -static int gre_init(void) -{ - int err; - - gre_ports++; - if (gre_ports > 1) - return 0; - - err = gre_cisco_register(&gre_protocol); - if (err) - pr_warn("cannot register gre protocol handler\n"); - - return err; -} - -static void gre_exit(void) -{ - gre_ports--; - if (gre_ports > 0) - return; - - gre_cisco_unregister(&gre_protocol); -} - -static const char *gre_get_name(const struct vport *vport) -{ - return vport_priv(vport); + return vport; } static struct vport *gre_create(const struct vport_parms *parms) { - struct net *net = ovs_dp_get_net(parms->dp); - struct ovs_net *ovs_net; struct vport *vport; - int err; - err = gre_init(); - if (err) - return ERR_PTR(err); - - ovs_net = net_generic(net, ovs_net_id); - if (ovsl_dereference(ovs_net->vport_net.gre_vport)) { - vport = ERR_PTR(-EEXIST); - goto error; - } - - vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms); + vport = gre_tnl_create(parms); if (IS_ERR(vport)) - goto error; + return vport; - strncpy(vport_priv(vport), parms->name, IFNAMSIZ); - rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport); - return vport; - -error: - gre_exit(); - return vport; -} - -static void gre_tnl_destroy(struct vport *vport) -{ - struct net *net = ovs_dp_get_net(vport->dp); - struct ovs_net *ovs_net; - - ovs_net = net_generic(net, ovs_net_id); - - RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL); - ovs_vport_deferred_free(vport); - gre_exit(); + return ovs_netdev_link(vport, parms->name); } static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, @@ -288,10 +96,9 @@ static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, static struct vport_ops ovs_gre_vport_ops = { .type = OVS_VPORT_TYPE_GRE, .create = gre_create, - .destroy = gre_tnl_destroy, - .get_name = gre_get_name, - .send = gre_tnl_send, + .send = ovs_netdev_send, .get_egress_tun_info = gre_get_egress_tun_info, + .destroy = ovs_netdev_tunnel_destroy, .owner = THIS_MODULE, }; diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index cddb7069b11b..4b70aaa4a746 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -147,7 +147,7 @@ static struct vport *netdev_create(const struct vport_parms *parms) return ovs_netdev_link(vport, parms->name); } -void ovs_vport_free_rcu(struct rcu_head *rcu) +static void vport_netdev_free(struct rcu_head *rcu) { struct vport *vport = container_of(rcu, struct vport, rcu); @@ -155,7 +155,6 @@ void ovs_vport_free_rcu(struct rcu_head *rcu) dev_put(vport->dev); ovs_vport_free(vport); } -EXPORT_SYMBOL_GPL(ovs_vport_free_rcu); void ovs_netdev_detach_dev(struct vport *vport) { @@ -175,9 +174,25 @@ static void netdev_destroy(struct vport *vport) ovs_netdev_detach_dev(vport); rtnl_unlock(); - call_rcu(&vport->rcu, ovs_vport_free_rcu); + call_rcu(&vport->rcu, vport_netdev_free); } +void ovs_netdev_tunnel_destroy(struct vport *vport) +{ + rtnl_lock(); + if (vport->dev->priv_flags & IFF_OVS_DATAPATH) + ovs_netdev_detach_dev(vport); + + /* Early release so we can unregister the device */ + dev_put(vport->dev); + rtnl_delete_link(vport->dev); + vport->dev = NULL; + rtnl_unlock(); + + call_rcu(&vport->rcu, vport_netdev_free); +} +EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy); + static unsigned int packet_length(const struct sk_buff *skb) { unsigned int length = skb->len - ETH_HLEN; diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index 804412697a90..497cc81f1aca 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h @@ -29,9 +29,9 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev); struct vport *ovs_netdev_link(struct vport *vport, const char *name); int ovs_netdev_send(struct vport *vport, struct sk_buff *skb); void ovs_netdev_detach_dev(struct vport *); -void ovs_vport_free_rcu(struct rcu_head *); int __init ovs_netdev_init(void); void ovs_netdev_exit(void); +void ovs_netdev_tunnel_destroy(struct vport *vport); #endif /* vport_netdev.h */ diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 547173336cd3..1e8b00a23a23 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) int err; struct vxlan_config conf = { .no_share = true, - .flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA, + .flags = VXLAN_F_COLLECT_METADATA, }; if (!options) { @@ -146,21 +146,6 @@ static struct vport *vxlan_create(const struct vport_parms *parms) return ovs_netdev_link(vport, parms->name); } -static void vxlan_destroy(struct vport *vport) -{ - rtnl_lock(); - if (vport->dev->priv_flags & IFF_OVS_DATAPATH) - ovs_netdev_detach_dev(vport); - - /* Early release so we can unregister the device */ - dev_put(vport->dev); - rtnl_delete_link(vport->dev); - vport->dev = NULL; - rtnl_unlock(); - - call_rcu(&vport->rcu, ovs_vport_free_rcu); -} - static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, struct ip_tunnel_info *egress_tun_info) { @@ -183,7 +168,7 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, static struct vport_ops ovs_vxlan_netdev_vport_ops = { .type = OVS_VPORT_TYPE_VXLAN, .create = vxlan_create, - .destroy = vxlan_destroy, + .destroy = ovs_netdev_tunnel_destroy, .get_options = vxlan_get_options, .send = ovs_netdev_send, .get_egress_tun_info = vxlan_get_egress_tun_info, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b5afe538bb88..7b8e39a22387 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -92,6 +92,7 @@ #ifdef CONFIG_INET #include #endif +#include #include "internal.h" @@ -1410,6 +1411,22 @@ static unsigned int fanout_demux_qm(struct packet_fanout *f, return skb_get_queue_mapping(skb) % num; } +static unsigned int fanout_demux_bpf(struct packet_fanout *f, + struct sk_buff *skb, + unsigned int num) +{ + struct bpf_prog *prog; + unsigned int ret = 0; + + rcu_read_lock(); + prog = rcu_dereference(f->bpf_prog); + if (prog) + ret = BPF_PROG_RUN(prog, skb) % num; + rcu_read_unlock(); + + return ret; +} + static bool fanout_has_flag(struct packet_fanout *f, u16 flag) { return f->flags & (flag >> 8); @@ -1454,6 +1471,10 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, case PACKET_FANOUT_ROLLOVER: idx = fanout_demux_rollover(f, skb, 0, false, num); break; + case PACKET_FANOUT_CBPF: + case PACKET_FANOUT_EBPF: + idx = fanout_demux_bpf(f, skb, num); + break; } if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER)) @@ -1502,6 +1523,103 @@ static bool match_fanout_group(struct packet_type *ptype, struct sock *sk) return false; } +static void fanout_init_data(struct packet_fanout *f) +{ + switch (f->type) { + case PACKET_FANOUT_LB: + atomic_set(&f->rr_cur, 0); + break; + case PACKET_FANOUT_CBPF: + case PACKET_FANOUT_EBPF: + RCU_INIT_POINTER(f->bpf_prog, NULL); + break; + } +} + +static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) +{ + struct bpf_prog *old; + + spin_lock(&f->lock); + old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock)); + rcu_assign_pointer(f->bpf_prog, new); + spin_unlock(&f->lock); + + if (old) { + synchronize_net(); + bpf_prog_destroy(old); + } +} + +static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, + unsigned int len) +{ + struct bpf_prog *new; + struct sock_fprog fprog; + int ret; + + if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) + return -EPERM; + if (len != sizeof(fprog)) + return -EINVAL; + if (copy_from_user(&fprog, data, len)) + return -EFAULT; + + ret = bpf_prog_create_from_user(&new, &fprog, NULL); + if (ret) + return ret; + + __fanout_set_data_bpf(po->fanout, new); + return 0; +} + +static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, + unsigned int len) +{ + struct bpf_prog *new; + u32 fd; + + if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) + return -EPERM; + if (len != sizeof(fd)) + return -EINVAL; + if (copy_from_user(&fd, data, len)) + return -EFAULT; + + new = bpf_prog_get(fd); + if (IS_ERR(new)) + return PTR_ERR(new); + if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) { + bpf_prog_put(new); + return -EINVAL; + } + + __fanout_set_data_bpf(po->fanout, new); + return 0; +} + +static int fanout_set_data(struct packet_sock *po, char __user *data, + unsigned int len) +{ + switch (po->fanout->type) { + case PACKET_FANOUT_CBPF: + return fanout_set_data_cbpf(po, data, len); + case PACKET_FANOUT_EBPF: + return fanout_set_data_ebpf(po, data, len); + default: + return -EINVAL; + }; +} + +static void fanout_release_data(struct packet_fanout *f) +{ + switch (f->type) { + case PACKET_FANOUT_CBPF: + case PACKET_FANOUT_EBPF: + __fanout_set_data_bpf(f, NULL); + }; +} + static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_sock *po = pkt_sk(sk); @@ -1519,6 +1637,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) case PACKET_FANOUT_CPU: case PACKET_FANOUT_RND: case PACKET_FANOUT_QM: + case PACKET_FANOUT_CBPF: + case PACKET_FANOUT_EBPF: break; default: return -EINVAL; @@ -1561,10 +1681,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->id = id; match->type = type; match->flags = flags; - atomic_set(&match->rr_cur, 0); INIT_LIST_HEAD(&match->list); spin_lock_init(&match->lock); atomic_set(&match->sk_ref, 0); + fanout_init_data(match); match->prot_hook.type = po->prot_hook.type; match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; @@ -1610,6 +1730,7 @@ static void fanout_release(struct sock *sk) if (atomic_dec_and_test(&f->sk_ref)) { list_del(&f->list); dev_remove_pack(&f->prot_hook); + fanout_release_data(f); kfree(f); } mutex_unlock(&fanout_mutex); @@ -3529,6 +3650,13 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return fanout_add(sk, val & 0xffff, val >> 16); } + case PACKET_FANOUT_DATA: + { + if (!po->fanout) + return -EINVAL; + + return fanout_set_data(po, optval, optlen); + } case PACKET_TX_HAS_OFF: { unsigned int val; diff --git a/net/packet/internal.h b/net/packet/internal.h index e20b3e8829b8..9ee46314b7d7 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -79,7 +79,10 @@ struct packet_fanout { u16 id; u8 type; u8 flags; - atomic_t rr_cur; + union { + atomic_t rr_cur; + struct bpf_prog __rcu *bpf_prog; + }; struct list_head list; struct sock *arr[PACKET_FANOUT_MAX]; spinlock_t lock; diff --git a/net/rds/bind.c b/net/rds/bind.c index 4ebd29c128b6..dd666fb9b4e1 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -185,7 +185,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) ret = 0; goto out; } - trans = rds_trans_get_preferred(sin->sin_addr.s_addr); + trans = rds_trans_get_preferred(sock_net(sock->sk), + sin->sin_addr.s_addr); if (!trans) { ret = -EADDRNOTAVAIL; rds_remove_bound(rs); diff --git a/net/rds/connection.c b/net/rds/connection.c index da6da57e5f36..d4fecb21ca25 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -117,7 +117,8 @@ static void rds_conn_reset(struct rds_connection *conn) * For now they are not garbage collected once they're created. They * are torn down as the module is removed, if ever. */ -static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, +static struct rds_connection *__rds_conn_create(struct net *net, + __be32 laddr, __be32 faddr, struct rds_transport *trans, gfp_t gfp, int is_outgoing) { @@ -157,6 +158,7 @@ new_conn: conn->c_faddr = faddr; spin_lock_init(&conn->c_lock); conn->c_next_tx_seq = 1; + rds_conn_net_set(conn, net); init_waitqueue_head(&conn->c_waitq); INIT_LIST_HEAD(&conn->c_send_queue); @@ -174,7 +176,7 @@ new_conn: * can bind to the destination address then we'd rather the messages * flow through loopback rather than either transport. */ - loop_trans = rds_trans_get_preferred(faddr); + loop_trans = rds_trans_get_preferred(net, faddr); if (loop_trans) { rds_trans_put(loop_trans); conn->c_loopback = 1; @@ -260,17 +262,19 @@ out: return conn; } -struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, +struct rds_connection *rds_conn_create(struct net *net, + __be32 laddr, __be32 faddr, struct rds_transport *trans, gfp_t gfp) { - return __rds_conn_create(laddr, faddr, trans, gfp, 0); + return __rds_conn_create(net, laddr, faddr, trans, gfp, 0); } EXPORT_SYMBOL_GPL(rds_conn_create); -struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, +struct rds_connection *rds_conn_create_outgoing(struct net *net, + __be32 laddr, __be32 faddr, struct rds_transport *trans, gfp_t gfp) { - return __rds_conn_create(laddr, faddr, trans, gfp, 1); + return __rds_conn_create(net, laddr, faddr, trans, gfp, 1); } EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); diff --git a/net/rds/ib.c b/net/rds/ib.c index ba2dffeff608..13814227b3b2 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -317,7 +317,7 @@ static void rds_ib_ic_info(struct socket *sock, unsigned int len, * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ -static int rds_ib_laddr_check(__be32 addr) +static int rds_ib_laddr_check(struct net *net, __be32 addr) { int ret; struct rdma_cm_id *cm_id; diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 0da2a45b33bd..f40d8f52b753 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -448,8 +448,9 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, (unsigned long long)be64_to_cpu(lguid), (unsigned long long)be64_to_cpu(fguid)); - conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport, - GFP_KERNEL); + /* RDS/IB is not currently netns aware, thus init_net */ + conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr, + &rds_ib_transport, GFP_KERNEL); if (IS_ERR(conn)) { rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); conn = NULL; diff --git a/net/rds/info.c b/net/rds/info.c index 9a6b4f66187c..140a44a5f7b7 100644 --- a/net/rds/info.c +++ b/net/rds/info.c @@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, /* check for all kinds of wrapping and the like */ start = (unsigned long)optval; - if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { + if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { ret = -EINVAL; goto out; } diff --git a/net/rds/iw.c b/net/rds/iw.c index 589935661d66..5d5a9d258658 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c @@ -218,7 +218,7 @@ static void rds_iw_ic_info(struct socket *sock, unsigned int len, * allowed to influence which paths have priority. We could call userspace * asserting this policy "routing". */ -static int rds_iw_laddr_check(__be32 addr) +static int rds_iw_laddr_check(struct net *net, __be32 addr) { int ret; struct rdma_cm_id *cm_id; diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 8f486fa32079..a6553a6fb2bc 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -398,8 +398,9 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, &dp->dp_saddr, &dp->dp_daddr, RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version)); - conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport, - GFP_KERNEL); + /* RDS/IW is not currently netns aware, thus init_net */ + conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr, + &rds_iw_transport, GFP_KERNEL); if (IS_ERR(conn)) { rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn)); conn = NULL; diff --git a/net/rds/rds.h b/net/rds/rds.h index 2260c1e434b1..9005fb0586f6 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -128,8 +128,21 @@ struct rds_connection { /* Protocol version */ unsigned int c_version; + possible_net_t c_net; }; +static inline +struct net *rds_conn_net(struct rds_connection *conn) +{ + return read_pnet(&conn->c_net); +} + +static inline +void rds_conn_net_set(struct rds_connection *conn, struct net *net) +{ + write_pnet(&conn->c_net, net); +} + #define RDS_FLAG_CONG_BITMAP 0x01 #define RDS_FLAG_ACK_REQUIRED 0x02 #define RDS_FLAG_RETRANSMITTED 0x04 @@ -417,7 +430,7 @@ struct rds_transport { unsigned int t_prefer_loopback:1; unsigned int t_type; - int (*laddr_check)(__be32 addr); + int (*laddr_check)(struct net *net, __be32 addr); int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp); void (*conn_free)(void *data); int (*conn_connect)(struct rds_connection *conn); @@ -608,9 +621,11 @@ struct rds_message *rds_cong_update_alloc(struct rds_connection *conn); /* conn.c */ int rds_conn_init(void); void rds_conn_exit(void); -struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, +struct rds_connection *rds_conn_create(struct net *net, + __be32 laddr, __be32 faddr, struct rds_transport *trans, gfp_t gfp); -struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, +struct rds_connection *rds_conn_create_outgoing(struct net *net, + __be32 laddr, __be32 faddr, struct rds_transport *trans, gfp_t gfp); void rds_conn_shutdown(struct rds_connection *conn); void rds_conn_destroy(struct rds_connection *conn); @@ -795,7 +810,7 @@ void rds_connect_complete(struct rds_connection *conn); /* transport.c */ int rds_trans_register(struct rds_transport *trans); void rds_trans_unregister(struct rds_transport *trans); -struct rds_transport *rds_trans_get_preferred(__be32 addr); +struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr); void rds_trans_put(struct rds_transport *trans); unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail); diff --git a/net/rds/send.c b/net/rds/send.c index e9430f537f9c..2581b8e3dbe7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1023,7 +1023,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) conn = rs->rs_conn; else { - conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, + conn = rds_conn_create_outgoing(sock_net(sock->sk), + rs->rs_bound_addr, daddr, rs->rs_transport, sock->sk->sk_allocation); if (IS_ERR(conn)) { diff --git a/net/rds/tcp.c b/net/rds/tcp.c index edac9ef2bc8b..c42b60bf4c68 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -35,6 +35,9 @@ #include #include #include +#include +#include +#include #include "rds.h" #include "tcp.h" @@ -189,9 +192,9 @@ out: spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags); } -static int rds_tcp_laddr_check(__be32 addr) +static int rds_tcp_laddr_check(struct net *net, __be32 addr) { - if (inet_addr_type(&init_net, addr) == RTN_LOCAL) + if (inet_addr_type(net, addr) == RTN_LOCAL) return 0; return -EADDRNOTAVAIL; } @@ -250,16 +253,7 @@ static void rds_tcp_destroy_conns(void) } } -static void rds_tcp_exit(void) -{ - rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); - rds_tcp_listen_stop(); - rds_tcp_destroy_conns(); - rds_trans_unregister(&rds_tcp_transport); - rds_tcp_recv_exit(); - kmem_cache_destroy(rds_tcp_conn_slab); -} -module_exit(rds_tcp_exit); +static void rds_tcp_exit(void); struct rds_transport rds_tcp_transport = { .laddr_check = rds_tcp_laddr_check, @@ -281,6 +275,136 @@ struct rds_transport rds_tcp_transport = { .t_prefer_loopback = 1, }; +static int rds_tcp_netid; + +/* per-network namespace private data for this module */ +struct rds_tcp_net { + struct socket *rds_tcp_listen_sock; + struct work_struct rds_tcp_accept_w; +}; + +static void rds_tcp_accept_worker(struct work_struct *work) +{ + struct rds_tcp_net *rtn = container_of(work, + struct rds_tcp_net, + rds_tcp_accept_w); + + while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0) + cond_resched(); +} + +void rds_tcp_accept_work(struct sock *sk) +{ + struct net *net = sock_net(sk); + struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + + queue_work(rds_wq, &rtn->rds_tcp_accept_w); +} + +static __net_init int rds_tcp_init_net(struct net *net) +{ + struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + + rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net); + if (!rtn->rds_tcp_listen_sock) { + pr_warn("could not set up listen sock\n"); + return -EAFNOSUPPORT; + } + INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker); + return 0; +} + +static void __net_exit rds_tcp_exit_net(struct net *net) +{ + struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + + /* If rds_tcp_exit_net() is called as a result of netns deletion, + * the rds_tcp_kill_sock() device notifier would already have cleaned + * up the listen socket, thus there is no work to do in this function. + * + * If rds_tcp_exit_net() is called as a result of module unload, + * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then + * we do need to clean up the listen socket here. + */ + if (rtn->rds_tcp_listen_sock) { + rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); + rtn->rds_tcp_listen_sock = NULL; + flush_work(&rtn->rds_tcp_accept_w); + } +} + +static struct pernet_operations rds_tcp_net_ops = { + .init = rds_tcp_init_net, + .exit = rds_tcp_exit_net, + .id = &rds_tcp_netid, + .size = sizeof(struct rds_tcp_net), +}; + +static void rds_tcp_kill_sock(struct net *net) +{ + struct rds_tcp_connection *tc, *_tc; + struct sock *sk; + LIST_HEAD(tmp_list); + struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid); + + rds_tcp_listen_stop(rtn->rds_tcp_listen_sock); + rtn->rds_tcp_listen_sock = NULL; + flush_work(&rtn->rds_tcp_accept_w); + spin_lock_irq(&rds_tcp_conn_lock); + list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { + struct net *c_net = read_pnet(&tc->conn->c_net); + + if (net != c_net || !tc->t_sock) + continue; + list_move_tail(&tc->t_tcp_node, &tmp_list); + } + spin_unlock_irq(&rds_tcp_conn_lock); + list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) { + sk = tc->t_sock->sk; + sk->sk_prot->disconnect(sk, 0); + tcp_done(sk); + if (tc->conn->c_passive) + rds_conn_destroy(tc->conn->c_passive); + rds_conn_destroy(tc->conn); + } +} + +static int rds_tcp_dev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + /* rds-tcp registers as a pernet subys, so the ->exit will only + * get invoked after network acitivity has quiesced. We need to + * clean up all sockets to quiesce network activity, and use + * the unregistration of the per-net loopback device as a trigger + * to start that cleanup. + */ + if (event == NETDEV_UNREGISTER_FINAL && + dev->ifindex == LOOPBACK_IFINDEX) + rds_tcp_kill_sock(dev_net(dev)); + + return NOTIFY_DONE; +} + +static struct notifier_block rds_tcp_dev_notifier = { + .notifier_call = rds_tcp_dev_event, + .priority = -10, /* must be called after other network notifiers */ +}; + +static void rds_tcp_exit(void) +{ + rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); + unregister_pernet_subsys(&rds_tcp_net_ops); + if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) + pr_warn("could not unregister rds_tcp_dev_notifier\n"); + rds_tcp_destroy_conns(); + rds_trans_unregister(&rds_tcp_transport); + rds_tcp_recv_exit(); + kmem_cache_destroy(rds_tcp_conn_slab); +} +module_exit(rds_tcp_exit); + static int rds_tcp_init(void) { int ret; @@ -293,6 +417,16 @@ static int rds_tcp_init(void) goto out; } + ret = register_netdevice_notifier(&rds_tcp_dev_notifier); + if (ret) { + pr_warn("could not register rds_tcp_dev_notifier\n"); + goto out; + } + + ret = register_pernet_subsys(&rds_tcp_net_ops); + if (ret) + goto out_slab; + ret = rds_tcp_recv_init(); if (ret) goto out_slab; @@ -301,19 +435,14 @@ static int rds_tcp_init(void) if (ret) goto out_recv; - ret = rds_tcp_listen_init(); - if (ret) - goto out_register; - rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info); goto out; -out_register: - rds_trans_unregister(&rds_tcp_transport); out_recv: rds_tcp_recv_exit(); out_slab: + unregister_pernet_subsys(&rds_tcp_net_ops); kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 0dbdd37162da..64f873c0c6b6 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -52,6 +52,7 @@ u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); extern struct rds_transport rds_tcp_transport; +void rds_tcp_accept_work(struct sock *sk); /* tcp_connect.c */ int rds_tcp_conn_connect(struct rds_connection *conn); @@ -59,9 +60,11 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn); void rds_tcp_state_change(struct sock *sk); /* tcp_listen.c */ -int rds_tcp_listen_init(void); -void rds_tcp_listen_stop(void); +struct socket *rds_tcp_listen_init(struct net *); +void rds_tcp_listen_stop(struct socket *); void rds_tcp_listen_data_ready(struct sock *sk); +int rds_tcp_accept_one(struct socket *sock); +int rds_tcp_keepalive(struct socket *sock); /* tcp_recv.c */ int rds_tcp_recv_init(void); diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 973109c7b8e8..5cb16875c460 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -79,7 +79,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn) struct sockaddr_in src, dest; int ret; - ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + ret = sock_create_kern(rds_conn_net(conn), PF_INET, + SOCK_STREAM, IPPROTO_TCP, &sock); if (ret < 0) goto out; @@ -111,10 +112,12 @@ int rds_tcp_conn_connect(struct rds_connection *conn) rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret); if (ret == -EINPROGRESS) ret = 0; - if (ret == 0) + if (ret == 0) { + rds_tcp_keepalive(sock); sock = NULL; - else + } else { rds_tcp_restore_callbacks(sock, conn->c_transport_data); + } out: if (sock) diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 0da49e34495f..444d78d0bd77 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -38,14 +38,7 @@ #include "rds.h" #include "tcp.h" -/* - * cheesy, but simple.. - */ -static void rds_tcp_accept_worker(struct work_struct *work); -static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker); -static struct socket *rds_tcp_listen_sock; - -static int rds_tcp_keepalive(struct socket *sock) +int rds_tcp_keepalive(struct socket *sock) { /* values below based on xs_udp_default_timeout */ int keepidle = 5; /* send a probe 'keepidle' secs after last data */ @@ -77,7 +70,7 @@ bail: return ret; } -static int rds_tcp_accept_one(struct socket *sock) +int rds_tcp_accept_one(struct socket *sock) { struct socket *new_sock = NULL; struct rds_connection *conn; @@ -85,8 +78,9 @@ static int rds_tcp_accept_one(struct socket *sock) struct inet_sock *inet; struct rds_tcp_connection *rs_tcp; - ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, - sock->sk->sk_protocol, &new_sock); + ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, + sock->sk->sk_type, sock->sk->sk_protocol, + &new_sock); if (ret) goto out; @@ -108,7 +102,8 @@ static int rds_tcp_accept_one(struct socket *sock) &inet->inet_saddr, ntohs(inet->inet_sport), &inet->inet_daddr, ntohs(inet->inet_dport)); - conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, + conn = rds_conn_create(sock_net(sock->sk), + inet->inet_saddr, inet->inet_daddr, &rds_tcp_transport, GFP_KERNEL); if (IS_ERR(conn)) { ret = PTR_ERR(conn); @@ -148,12 +143,6 @@ out: return ret; } -static void rds_tcp_accept_worker(struct work_struct *work) -{ - while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0) - cond_resched(); -} - void rds_tcp_listen_data_ready(struct sock *sk) { void (*ready)(struct sock *sk); @@ -174,20 +163,20 @@ void rds_tcp_listen_data_ready(struct sock *sk) * socket */ if (sk->sk_state == TCP_LISTEN) - queue_work(rds_wq, &rds_tcp_listen_work); + rds_tcp_accept_work(sk); out: read_unlock(&sk->sk_callback_lock); ready(sk); } -int rds_tcp_listen_init(void) +struct socket *rds_tcp_listen_init(struct net *net) { struct sockaddr_in sin; struct socket *sock = NULL; int ret; - ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); if (ret < 0) goto out; @@ -211,17 +200,15 @@ int rds_tcp_listen_init(void) if (ret < 0) goto out; - rds_tcp_listen_sock = sock; - sock = NULL; + return sock; out: if (sock) sock_release(sock); - return ret; + return NULL; } -void rds_tcp_listen_stop(void) +void rds_tcp_listen_stop(struct socket *sock) { - struct socket *sock = rds_tcp_listen_sock; struct sock *sk; if (!sock) @@ -242,5 +229,4 @@ void rds_tcp_listen_stop(void) /* wait for accepts to stop and close the socket */ flush_workqueue(rds_wq); sock_release(sock); - rds_tcp_listen_sock = NULL; } diff --git a/net/rds/transport.c b/net/rds/transport.c index 83498e1c75b8..f3afd1d60d3c 100644 --- a/net/rds/transport.c +++ b/net/rds/transport.c @@ -77,7 +77,7 @@ void rds_trans_put(struct rds_transport *trans) module_put(trans->t_owner); } -struct rds_transport *rds_trans_get_preferred(__be32 addr) +struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr) { struct rds_transport *ret = NULL; struct rds_transport *trans; @@ -90,7 +90,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr) for (i = 0; i < RDS_TRANS_COUNT; i++) { trans = transports[i]; - if (trans && (trans->laddr_check(addr) == 0) && + if (trans && (trans->laddr_check(net, addr) == 0) && (!trans->t_owner || try_module_get(trans->t_owner))) { ret = trans; break; diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig index 4c10e7e6c9f6..598d374f6a35 100644 --- a/net/rfkill/Kconfig +++ b/net/rfkill/Kconfig @@ -36,7 +36,8 @@ config RFKILL_REGULATOR config RFKILL_GPIO tristate "GPIO RFKILL driver" - depends on RFKILL && GPIOLIB + depends on RFKILL + depends on GPIOLIB || COMPILE_TEST default n help If you say yes here you get support of a generic gpio RFKILL diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index d5d58d919552..93127220cb54 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -164,7 +164,6 @@ static int rfkill_gpio_remove(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id rfkill_acpi_match[] = { { "BCM2E1A", RFKILL_TYPE_BLUETOOTH }, - { "BCM2E39", RFKILL_TYPE_BLUETOOTH }, { "BCM2E3D", RFKILL_TYPE_BLUETOOTH }, { "BCM2E40", RFKILL_TYPE_BLUETOOTH }, { "BCM2E64", RFKILL_TYPE_BLUETOOTH }, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 19cd8904efa0..2d1be4a760fd 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -101,6 +101,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, return ret; ret = ACT_P_CREATED; } else { + if (bind) + return 0; if (!ovr) { tcf_hash_release(a, bind); return -EEXIST; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 5be0b3c1c5b0..b7c4ead8b5a8 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -162,7 +162,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, goto drop; tcph = (void *)(skb_network_header(skb) + ihl); - inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, + true); break; } case IPPROTO_UDP: @@ -178,7 +179,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, udph = (void *)(skb_network_header(skb) + ihl); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&udph->check, skb, addr, - new_addr, 1); + new_addr, true); if (!udph->check) udph->check = CSUM_MANGLED_0; } @@ -231,7 +232,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a, iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, - 0); + false); break; } default: diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 2e2398cfc694..2177eac0a61e 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -54,7 +54,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) bool is_bfifo = sch->ops == &bfifo_qdisc_ops; if (opt == NULL) { - u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; + u32 limit = qdisc_dev(sch)->tx_queue_len; if (is_bfifo) limit *= psched_mtu(qdisc_dev(sch)); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 21ca33c9f036..a9ba030435a2 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -288,10 +288,26 @@ begin: static void fq_codel_reset(struct Qdisc *sch) { - struct sk_buff *skb; + struct fq_codel_sched_data *q = qdisc_priv(sch); + int i; - while ((skb = fq_codel_dequeue(sch)) != NULL) - kfree_skb(skb); + INIT_LIST_HEAD(&q->new_flows); + INIT_LIST_HEAD(&q->old_flows); + for (i = 0; i < q->flows_cnt; i++) { + struct fq_codel_flow *flow = q->flows + i; + + while (flow->head) { + struct sk_buff *skb = dequeue_head(flow); + + qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); + } + + INIT_LIST_HEAD(&flow->flowchain); + codel_vars_init(&flow->cvars); + } + memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); + sch->q.qlen = 0; } static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6efca30894aa..942fea8405a4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -735,7 +735,7 @@ static void attach_one_default_qdisc(struct net_device *dev, { struct Qdisc *qdisc = &noqueue_qdisc; - if (dev->tx_queue_len) { + if (dev->tx_queue_len && !(dev->priv_flags & IFF_NO_QUEUE)) { qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, TC_H_ROOT); if (!qdisc) { @@ -755,7 +755,9 @@ static void attach_default_qdiscs(struct net_device *dev) txq = netdev_get_tx_queue(dev, 0); - if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { + if (!netif_is_multiqueue(dev) || + dev->tx_queue_len == 0 || + dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); dev->qdisc = txq->qdisc_sleeping; atomic_inc(&dev->qdisc->refcnt); diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index abb9f2fec28f..80105109f756 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -512,11 +512,9 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_GRED_LIMIT]) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); - else { - u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1; - - sch->limit = qlen * psched_mtu(qdisc_dev(sch)); - } + else + sch->limit = qdisc_dev(sch)->tx_queue_len + * psched_mtu(qdisc_dev(sch)); return gred_change_table_def(sch, tb[TCA_GRED_DPS]); } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f1acb0f60dc3..cf4b0f865d1b 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1048,11 +1048,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_HTB_DIRECT_QLEN]) q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); - else { + else q->direct_qlen = qdisc_dev(sch)->tx_queue_len; - if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ - q->direct_qlen = 2; - } + if ((q->rate2quantum = gopt->rate2quantum) < 1) q->rate2quantum = 1; q->defcls = gopt->defcls; diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index ade9445a55ab..5abfe44678d4 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -130,12 +130,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt) q->unplug_indefinite = false; if (opt == NULL) { - /* We will set a default limit of 100 pkts (~150kB) - * in case tx_queue_len is not available. The - * default value is completely arbitrary. - */ - u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100; - q->limit = pkt_limit * psched_mtu(qdisc_dev(sch)); + q->limit = qdisc_dev(sch)->tx_queue_len + * psched_mtu(qdisc_dev(sch)); } else { struct tc_plug_qopt *ctl = nla_data(opt); diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 4b815193326c..dcdff5c769a1 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -502,7 +502,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt) limit = ctl->limit; if (limit == 0) - limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); + limit = qdisc_dev(sch)->tx_queue_len; child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit); if (IS_ERR(child)) diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 33bafa2e703e..16c1c43980a1 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -810,7 +810,7 @@ static int switchdev_port_fdb_dump_cb(struct net_device *dev, ndm->ndm_flags = NTF_SELF; ndm->ndm_type = 0; ndm->ndm_ifindex = dev->ifindex; - ndm->ndm_state = NUD_REACHABLE; + ndm->ndm_state = obj->u.fdb.ndm_state; if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr)) goto nla_put_failure; diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 53e0fee80086..1eadc95e1132 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -1114,7 +1114,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) } len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); - if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) { + if (len && !TLV_OK(msg.req, len)) { msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); err = -EOPNOTSUPP; goto send; diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c index 7d730543f243..477364ad750e 100644 --- a/net/wimax/op-rfkill.c +++ b/net/wimax/op-rfkill.c @@ -135,8 +135,7 @@ EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw); * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on, * %WIMAX_RF_OFF radio off. * - * Reports changes in the software RF switch state to the the WiMAX - * stack. + * Reports changes in the software RF switch state to the WiMAX stack. * * The main use is during initialization, so the driver can query the * device for its current software radio kill switch state and feed it diff --git a/net/wireless/core.c b/net/wireless/core.c index 2a0bbd22854b..3893409dee95 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -407,6 +407,9 @@ use_default_name: INIT_LIST_HEAD(&rdev->bss_list); INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results); + INIT_LIST_HEAD(&rdev->mlme_unreg); + spin_lock_init(&rdev->mlme_unreg_lock); + INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk); INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk, cfg80211_dfs_channels_update_work); #ifdef CONFIG_CFG80211_WEXT @@ -802,6 +805,7 @@ void wiphy_unregister(struct wiphy *wiphy) cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); flush_work(&rdev->destroy_work); flush_work(&rdev->sched_scan_stop_wk); + flush_work(&rdev->mlme_unreg_wk); #ifdef CONFIG_PM if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) @@ -855,6 +859,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) switch (wdev->iftype) { case NL80211_IFTYPE_P2P_DEVICE: + cfg80211_mlme_purge_registrations(wdev); cfg80211_stop_p2p_device(rdev, wdev); break; default: diff --git a/net/wireless/core.h b/net/wireless/core.h index 311eef26bf88..b9d5bc8c148d 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -59,6 +59,10 @@ struct cfg80211_registered_device { struct list_head beacon_registrations; spinlock_t beacon_registrations_lock; + struct list_head mlme_unreg; + spinlock_t mlme_unreg_lock; + struct work_struct mlme_unreg_wk; + /* protected by RTNL only */ int num_running_ifaces; int num_running_monitor_ifaces; @@ -348,6 +352,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid, u16 frame_type, const u8 *match_data, int match_len); +void cfg80211_mlme_unreg_wk(struct work_struct *wk); void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid); void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev); int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 7aae329e2b4e..fb44fa3bf4ef 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -2,6 +2,7 @@ * cfg80211 MLME SAP interface * * Copyright (c) 2009, Jouni Malinen + * Copyright (c) 2015 Intel Deutschland GmbH */ #include @@ -389,6 +390,7 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, struct cfg80211_mgmt_registration { struct list_head list; + struct wireless_dev *wdev; u32 nlportid; @@ -399,6 +401,46 @@ struct cfg80211_mgmt_registration { u8 match[]; }; +static void +cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev) +{ + struct cfg80211_mgmt_registration *reg; + + ASSERT_RTNL(); + + spin_lock_bh(&rdev->mlme_unreg_lock); + while ((reg = list_first_entry_or_null(&rdev->mlme_unreg, + struct cfg80211_mgmt_registration, + list))) { + list_del(®->list); + spin_unlock_bh(&rdev->mlme_unreg_lock); + + if (rdev->ops->mgmt_frame_register) { + u16 frame_type = le16_to_cpu(reg->frame_type); + + rdev_mgmt_frame_register(rdev, reg->wdev, + frame_type, false); + } + + kfree(reg); + + spin_lock_bh(&rdev->mlme_unreg_lock); + } + spin_unlock_bh(&rdev->mlme_unreg_lock); +} + +void cfg80211_mlme_unreg_wk(struct work_struct *wk) +{ + struct cfg80211_registered_device *rdev; + + rdev = container_of(wk, struct cfg80211_registered_device, + mlme_unreg_wk); + + rtnl_lock(); + cfg80211_process_mlme_unregistrations(rdev); + rtnl_unlock(); +} + int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, u16 frame_type, const u8 *match_data, int match_len) @@ -449,11 +491,18 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid, nreg->match_len = match_len; nreg->nlportid = snd_portid; nreg->frame_type = cpu_to_le16(frame_type); + nreg->wdev = wdev; list_add(&nreg->list, &wdev->mgmt_registrations); + spin_unlock_bh(&wdev->mgmt_registrations_lock); + + /* process all unregistrations to avoid driver confusion */ + cfg80211_process_mlme_unregistrations(rdev); if (rdev->ops->mgmt_frame_register) rdev_mgmt_frame_register(rdev, wdev, frame_type, true); + return 0; + out: spin_unlock_bh(&wdev->mgmt_registrations_lock); @@ -472,15 +521,12 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) if (reg->nlportid != nlportid) continue; - if (rdev->ops->mgmt_frame_register) { - u16 frame_type = le16_to_cpu(reg->frame_type); - - rdev_mgmt_frame_register(rdev, wdev, - frame_type, false); - } - list_del(®->list); - kfree(reg); + spin_lock(&rdev->mlme_unreg_lock); + list_add_tail(®->list, &rdev->mlme_unreg); + spin_unlock(&rdev->mlme_unreg_lock); + + schedule_work(&rdev->mlme_unreg_wk); } spin_unlock_bh(&wdev->mgmt_registrations_lock); @@ -496,16 +542,15 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlportid) void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) { - struct cfg80211_mgmt_registration *reg, *tmp; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); spin_lock_bh(&wdev->mgmt_registrations_lock); - - list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) { - list_del(®->list); - kfree(reg); - } - + spin_lock(&rdev->mlme_unreg_lock); + list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg); + spin_unlock(&rdev->mlme_unreg_lock); spin_unlock_bh(&wdev->mgmt_registrations_lock); + + cfg80211_process_mlme_unregistrations(rdev); } int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 76b41578a838..5d8748b4c8a2 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2321,6 +2321,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; rdev->wiphy.coverage_class = old_coverage_class; + return result; } } return 0; @@ -7390,7 +7391,8 @@ static int nl80211_set_mcast_rate(struct sk_buff *skb, struct genl_info *info) int err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->set_mcast_rate) diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index c6e83a7468c0..c23516d0f807 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -733,6 +733,8 @@ static inline void rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u16 frame_type, bool reg) { + might_sleep(); + trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg); trace_rdev_return_void(&rdev->wiphy); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index aa2d75482017..b144485946f2 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1004,7 +1004,7 @@ static u32 map_regdom_flags(u32 rd_flags) static const struct ieee80211_reg_rule * freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, - const struct ieee80211_regdomain *regd) + const struct ieee80211_regdomain *regd, u32 bw) { int i; bool band_rule_found = false; @@ -1028,7 +1028,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); - bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20)); + bw_fits = reg_does_bw_fit(fr, center_freq, bw); if (band_rule_found && bw_fits) return rr; @@ -1040,14 +1040,26 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq, return ERR_PTR(-EINVAL); } +const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy, + u32 center_freq, u32 min_bw) +{ + const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); + const struct ieee80211_reg_rule *reg_rule = NULL; + u32 bw; + + for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { + reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw); + if (!IS_ERR(reg_rule)) + return reg_rule; + } + + return reg_rule; +} + const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq) { - const struct ieee80211_regdomain *regd; - - regd = reg_get_regdomain(wiphy); - - return freq_reg_info_regd(wiphy, center_freq, regd); + return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20)); } EXPORT_SYMBOL(freq_reg_info); @@ -1176,8 +1188,20 @@ static void handle_channel(struct wiphy *wiphy, if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); + /* If we get a reg_rule we can assume that at least 5Mhz fit */ + if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(10))) + bw_flags |= IEEE80211_CHAN_NO_10MHZ; + if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(20))) + bw_flags |= IEEE80211_CHAN_NO_20MHZ; + + if (max_bandwidth_khz < MHZ_TO_KHZ(10)) + bw_flags |= IEEE80211_CHAN_NO_10MHZ; + if (max_bandwidth_khz < MHZ_TO_KHZ(20)) + bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) - bw_flags = IEEE80211_CHAN_NO_HT40; + bw_flags |= IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) @@ -1695,9 +1719,15 @@ static void handle_channel_custom(struct wiphy *wiphy, const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; u32 max_bandwidth_khz; + u32 bw; - reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq), - regd); + for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) { + reg_rule = freq_reg_info_regd(wiphy, + MHZ_TO_KHZ(chan->center_freq), + regd, bw); + if (!IS_ERR(reg_rule)) + break; + } if (IS_ERR(reg_rule)) { REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n", @@ -1721,8 +1751,20 @@ static void handle_channel_custom(struct wiphy *wiphy, if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); + /* If we get a reg_rule we can assume that at least 5Mhz fit */ + if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(10))) + bw_flags |= IEEE80211_CHAN_NO_10MHZ; + if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq), + MHZ_TO_KHZ(20))) + bw_flags |= IEEE80211_CHAN_NO_20MHZ; + + if (max_bandwidth_khz < MHZ_TO_KHZ(10)) + bw_flags |= IEEE80211_CHAN_NO_10MHZ; + if (max_bandwidth_khz < MHZ_TO_KHZ(20)) + bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) - bw_flags = IEEE80211_CHAN_NO_HT40; + bw_flags |= IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) @@ -2079,10 +2121,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) reg_process_hint_core(reg_request); return; case NL80211_REGDOM_SET_BY_USER: - treatment = reg_process_hint_user(reg_request); - if (treatment == REG_REQ_IGNORE || - treatment == REG_REQ_ALREADY_SET) - return; + reg_process_hint_user(reg_request); return; case NL80211_REGDOM_SET_BY_DRIVER: if (!wiphy) @@ -2099,7 +2138,9 @@ static void reg_process_hint(struct regulatory_request *reg_request) goto out_free; } - /* This is required so that the orig_* parameters are saved */ + /* This is required so that the orig_* parameters are saved. + * NOTE: treatment must be set for any case that reaches here! + */ if (treatment == REG_REQ_ALREADY_SET && wiphy && wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, reg_request->initiator); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 18cead7645be..94af3d065785 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -115,7 +115,8 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) rcu_read_unlock(); } -static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, +static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, + int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, int family) @@ -127,14 +128,15 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, if (unlikely(afinfo == NULL)) return ERR_PTR(-EAFNOSUPPORT); - dst = afinfo->dst_lookup(net, tos, saddr, daddr); + dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr); xfrm_policy_put_afinfo(afinfo); return dst; } -static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, +static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, + int tos, int oif, xfrm_address_t *prev_saddr, xfrm_address_t *prev_daddr, int family) @@ -153,7 +155,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, daddr = x->coaddr; } - dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); + dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family); if (!IS_ERR(dst)) { if (prev_saddr != saddr) @@ -1373,15 +1375,15 @@ int __xfrm_sk_clone_policy(struct sock *sk) } static int -xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, - unsigned short family) +xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, + xfrm_address_t *remote, unsigned short family) { int err; struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EINVAL; - err = afinfo->get_saddr(net, local, remote); + err = afinfo->get_saddr(net, oif, local, remote); xfrm_policy_put_afinfo(afinfo); return err; } @@ -1410,7 +1412,9 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, remote = &tmpl->id.daddr; local = &tmpl->saddr; if (xfrm_addr_any(local, tmpl->encap_family)) { - error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); + error = xfrm_get_saddr(net, fl->flowi_oif, + &tmp, remote, + tmpl->encap_family); if (error) goto fail; local = &tmp; @@ -1690,8 +1694,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { family = xfrm[i]->props.family; - dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, - family); + dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, + &saddr, &daddr, family); err = PTR_ERR(dst); if (IS_ERR(dst)) goto put_states; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 0cebf1fc37a2..a8de9e300200 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -925,12 +925,10 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) return err; if (attrs[XFRMA_ADDRESS_FILTER]) { - filter = kmalloc(sizeof(*filter), GFP_KERNEL); + filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), + sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; - - memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]), - sizeof(*filter)); } if (attrs[XFRMA_PROTO]) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4450fed91ab4..63e7d50e6a4f 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -12,6 +12,7 @@ hostprogs-y += tracex2 hostprogs-y += tracex3 hostprogs-y += tracex4 hostprogs-y += tracex5 +hostprogs-y += tracex6 hostprogs-y += lathist test_verifier-objs := test_verifier.o libbpf.o @@ -25,6 +26,7 @@ tracex2-objs := bpf_load.o libbpf.o tracex2_user.o tracex3-objs := bpf_load.o libbpf.o tracex3_user.o tracex4-objs := bpf_load.o libbpf.o tracex4_user.o tracex5-objs := bpf_load.o libbpf.o tracex5_user.o +tracex6-objs := bpf_load.o libbpf.o tracex6_user.o lathist-objs := bpf_load.o libbpf.o lathist_user.o # Tell kbuild to always build the programs @@ -37,6 +39,7 @@ always += tracex2_kern.o always += tracex3_kern.o always += tracex4_kern.o always += tracex5_kern.o +always += tracex6_kern.o always += tcbpf1_kern.o always += lathist_kern.o @@ -51,6 +54,7 @@ HOSTLOADLIBES_tracex2 += -lelf HOSTLOADLIBES_tracex3 += -lelf HOSTLOADLIBES_tracex4 += -lelf -lrt HOSTLOADLIBES_tracex5 += -lelf +HOSTLOADLIBES_tracex6 += -lelf HOSTLOADLIBES_lathist += -lelf # point this to your LLVM backend with bpf support diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index c77c872fe8ee..3a44d3a272af 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -31,6 +31,8 @@ static unsigned long long (*bpf_get_current_uid_gid)(void) = (void *) BPF_FUNC_get_current_uid_gid; static int (*bpf_get_current_comm)(void *buf, int buf_size) = (void *) BPF_FUNC_get_current_comm; +static int (*bpf_perf_event_read)(void *map, int index) = + (void *) BPF_FUNC_perf_event_read; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c new file mode 100644 index 000000000000..be479c4af9e2 --- /dev/null +++ b/samples/bpf/tracex6_kern.c @@ -0,0 +1,27 @@ +#include +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") my_map = { + .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(u32), + .max_entries = 32, +}; + +SEC("kprobe/sys_write") +int bpf_prog1(struct pt_regs *ctx) +{ + u64 count; + u32 key = bpf_get_smp_processor_id(); + char fmt[] = "CPU-%d %llu\n"; + + count = bpf_perf_event_read(&my_map, key); + bpf_trace_printk(fmt, sizeof(fmt), key, count); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c new file mode 100644 index 000000000000..8ea4976cfcf1 --- /dev/null +++ b/samples/bpf/tracex6_user.c @@ -0,0 +1,72 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" + +#define SAMPLE_PERIOD 0x7fffffffffffffffULL + +static void test_bpf_perf_event(void) +{ + int nr_cpus = sysconf(_SC_NPROCESSORS_CONF); + int *pmu_fd = malloc(nr_cpus * sizeof(int)); + int status, i; + + struct perf_event_attr attr_insn_pmu = { + .freq = 0, + .sample_period = SAMPLE_PERIOD, + .inherit = 0, + .type = PERF_TYPE_HARDWARE, + .read_format = 0, + .sample_type = 0, + .config = 0,/* PMU: cycles */ + }; + + for (i = 0; i < nr_cpus; i++) { + pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0); + if (pmu_fd[i] < 0) { + printf("event syscall failed\n"); + goto exit; + } + + bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY); + ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); + } + + status = system("ls > /dev/null"); + if (status) + goto exit; + status = system("sleep 2"); + if (status) + goto exit; + +exit: + for (i = 0; i < nr_cpus; i++) + close(pmu_fd[i]); + close(map_fd[0]); + free(pmu_fd); +} + +int main(int argc, char **argv) +{ + char filename[256]; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + test_bpf_perf_event(); + read_trace_pipe(); + + return 0; +} diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl index 9cb8522d8d22..f3d3fb42b873 100755 --- a/scripts/kconfig/streamline_config.pl +++ b/scripts/kconfig/streamline_config.pl @@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); my $kconfig = $ARGV[1]; my $lsmod_file = $ENV{'LSMOD'}; -my @makefiles = `find $ksource -name Makefile 2>/dev/null`; +my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`; chomp @makefiles; my %depends; diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 9ed32502470e..5ebb89687936 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -406,6 +406,7 @@ static __init int yama_init(void) */ if (!security_module_enable("yama")) return 0; + yama_add_hooks(); #endif pr_info("Yama: becoming mindful.\n"); diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 7bb988fa6b6d..2a153d260836 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s, s->data_block_counter != UINT_MAX) data_block_counter = s->data_block_counter; - if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || - (s->data_block_counter == UINT_MAX)) { + if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && + data_block_counter == s->tx_first_dbc) || + s->data_block_counter == UINT_MAX) { lost = false; } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { lost = data_block_counter != s->data_block_counter; diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h index 26b909329e54..b2cf9e75693b 100644 --- a/sound/firewire/amdtp.h +++ b/sound/firewire/amdtp.h @@ -157,6 +157,8 @@ struct amdtp_stream { /* quirk: fixed interval of dbc between previos/current packets. */ unsigned int tx_dbc_interval; + /* quirk: indicate the value of dbc field in a first packet. */ + unsigned int tx_first_dbc; bool callbacked; wait_queue_head_t callback_wait; diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index c670db4eee70..c94a432f7cc6 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -248,10 +248,16 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; - if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2) - efw->is_af2 = true; + /* AudioFire8 (since 2009) and AudioFirePre8 */ if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) efw->is_af9 = true; + /* These models uses the same firmware. */ + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_4 || + entry->model_id == MODEL_ECHO_AUDIOFIRE_9 || + entry->model_id == MODEL_GIBSON_RIP || + entry->model_id == MODEL_GIBSON_GOLDTOP) + efw->is_fireworks3 = true; snd_efw_proc_init(efw); diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index c33252b7bc84..084d414b228c 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -70,8 +70,8 @@ struct snd_efw { bool resp_addr_changable; /* for quirks */ - bool is_af2; bool is_af9; + bool is_fireworks3; u32 firmware_version; unsigned int midi_in_ports; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index a0762dd6231e..7e353f1f7bff 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -172,9 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; /* Fireworks reset dbc at bus reset. */ efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; - /* AudioFire2 starts packets with non-zero dbc. */ - if (efw->is_af2) - efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK; + /* + * But Recent firmwares starts packets with non-zero dbc. + * Driver version 5.7.6 installs firmware version 5.7.3. + */ + if (efw->is_fireworks3 && + (efw->firmware_version == 0x5070000 || + efw->firmware_version == 0x5070300 || + efw->firmware_version == 0x5080000)) + efw->tx_stream.tx_first_dbc = 0x02; /* AudioFire9 always reports wrong dbs. */ if (efw->is_af9) efw->tx_stream.flags |= CIP_WRONG_DBS; diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index b2da19b60f4e..358f16195483 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus) offset = snd_hdac_chip_readl(bus, LLCH); - if (offset < 0) - return -EIO; - /* Lets walk the linked capabilities list */ do { cur_cap = _snd_hdac_chip_read(l, bus, offset); - if (cur_cap < 0) - return -EIO; - dev_dbg(bus->dev, "Capability version: 0x%x\n", ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index f8ffbdbb450d..3de47dd1a76d 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c @@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus, if (stream->direction != substream->stream) continue; - if (stream->opened) { + if (!stream->opened) { if (!hstream->decoupled) snd_hdac_ext_stream_decouple(ebus, hstream, true); res = hstream; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c456c04e0928..0b9847affbec 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5189,6 +5189,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c index 6492bca8c70f..4ca12665ff73 100644 --- a/sound/pci/oxygen/oxygen_mixer.c +++ b/sound/pci/oxygen/oxygen_mixer.c @@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl, int changed; mutex_lock(&chip->mutex); - changed = !value->value.integer.value[0] != chip->dac_mute; + changed = (!value->value.integer.value[0]) != chip->dac_mute; if (changed) { chip->dac_mute = !value->value.integer.value[0]; chip->model.update_dac_mute(chip); diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index d7ec4756e45b..8e36198474d9 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, case SND_SOC_DAIFMT_RIGHT_J: if (params_width(params) == 16) { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (1 << 5)); + CS4265_DAC_CTL_DIF, (2 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (2 << 6)); } else { snd_soc_update_bits(codec, CS4265_DAC_CTL, - CS4265_DAC_CTL_DIF, (3 << 5)); + CS4265_DAC_CTL_DIF, (3 << 4)); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 7)); + CS4265_SPDIF_CTL2_DIF, (3 << 6)); } break; case SND_SOC_DAIFMT_LEFT_J: @@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, snd_soc_update_bits(codec, CS4265_ADC_CTL, CS4265_ADC_DIF, 0); snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, - CS4265_SPDIF_CTL2_DIF, (1 << 6)); + CS4265_SPDIF_CTL2_DIF, 0); break; default: diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index e9cc3aae5366..961bd7e5877e 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, break; case RT5645_DMIC_DATA_GPIO5: + regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, + RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO); regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0353a6a273ab..278bb9f464c4 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -1693,6 +1693,10 @@ #define RT5645_GP6_PIN_SFT 6 #define RT5645_GP6_PIN_GPIO6 (0x0 << 6) #define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) +#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4) +#define RT5645_I2S2_DAC_PIN_SFT 4 +#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4) +#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4) #define RT5645_GP8_PIN_MASK (0x1 << 3) #define RT5645_GP8_PIN_SFT 3 #define RT5645_GP8_PIN_GPIO8 (0x0 << 3) diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index 4c01bb43928d..5bbaa667bec1 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c @@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata) if (byt == NULL) return -ENOMEM; + byt->dev = dev; + ipc = &byt->ipc; ipc->dev = dev; ipc->ops.tx_msg = byt_tx_msg; diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index f95f271aab0c..f6efa9d4acad 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c @@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata) if (hsw == NULL) return -ENOMEM; + hsw->dev = dev; + ipc = &hsw->ipc; ipc->dev = dev; ipc->ops.tx_msg = hsw_tx_msg; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 59ac211f8fe7..31068b8f3db0 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -33,6 +33,7 @@ #include #include #include +#include /* * We make several passes over the data (since it wont necessarily be ordered) @@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, k->put = bops[i].put; if (k->get == NULL && bops[i].id == hdr->ops.get) k->get = bops[i].get; - if (k->info == NULL && ops[i].id == hdr->ops.info) + if (k->info == NULL && bops[i].id == hdr->ops.info) k->info = bops[i].info; } @@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg, return 0; } -static int soc_tplg_create_tlv(struct soc_tplg *tplg, - struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv) + +static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg, + struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale) { - struct snd_ctl_tlv *tlv; - int size; + unsigned int item_len = 2 * sizeof(unsigned int); + unsigned int *p; - if (tplg_tlv->count == 0) - return 0; - - size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) & - ~(sizeof(unsigned int) - 1)); - tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL); - if (tlv == NULL) + p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL); + if (!p) return -ENOMEM; - dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", - tplg_tlv->numid, size); + p[0] = SNDRV_CTL_TLVT_DB_SCALE; + p[1] = item_len; + p[2] = scale->min; + p[3] = (scale->step & TLV_DB_SCALE_MASK) + | (scale->mute ? TLV_DB_SCALE_MUTE : 0); - tlv->numid = tplg_tlv->numid; - tlv->length = size; - memcpy(&tlv->tlv[0], tplg_tlv->data, size); - kc->tlv.p = (void *)tlv; + kc->tlv.p = (void *)p; + return 0; +} + +static int soc_tplg_create_tlv(struct soc_tplg *tplg, + struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc) +{ + struct snd_soc_tplg_ctl_tlv *tplg_tlv; + + if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)) + return 0; + + if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { + kc->tlv.c = snd_soc_bytes_tlv_callback; + } else { + tplg_tlv = &tc->tlv; + switch (tplg_tlv->type) { + case SNDRV_CTL_TLVT_DB_SCALE: + return soc_tplg_create_tlv_db_scale(tplg, kc, + &tplg_tlv->scale); + + /* TODO: add support for other TLV types */ + default: + dev_dbg(tplg->dev, "Unsupported TLV type %d\n", + tplg_tlv->type); + return -EINVAL; + } + } return 0; } @@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count, } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc, &mc->tlv); + soc_tplg_create_tlv(tplg, &kc, &mc->hdr); /* register control here */ err = soc_tplg_add_kcontrol(tplg, &kc, @@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg, template.reg = w->reg; template.shift = w->shift; template.mask = w->mask; + template.subseq = w->subseq; template.on_val = w->invert ? 0 : 1; template.off_val = w->invert ? 1 : 0; template.ignore_suspend = w->ignore_suspend; diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index 08c2a36ef7a9..412459369686 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -19,6 +19,8 @@ * - PACKET_FANOUT_LB * - PACKET_FANOUT_CPU * - PACKET_FANOUT_ROLLOVER + * - PACKET_FANOUT_CBPF + * - PACKET_FANOUT_EBPF * * Todo: * - functionality: PACKET_FANOUT_FLAG_DEFRAG @@ -44,7 +46,9 @@ #include #include #include +#include /* for __NR_bpf */ #include +#include #include #include #include @@ -91,6 +95,51 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets) return fd; } +static void sock_fanout_set_ebpf(int fd) +{ + const int len_off = __builtin_offsetof(struct __sk_buff, len); + struct bpf_insn prog[] = { + { BPF_ALU64 | BPF_MOV | BPF_X, 6, 1, 0, 0 }, + { BPF_LDX | BPF_W | BPF_MEM, 0, 6, len_off, 0 }, + { BPF_JMP | BPF_JGE | BPF_K, 0, 0, 1, DATA_LEN }, + { BPF_JMP | BPF_JA | BPF_K, 0, 0, 4, 0 }, + { BPF_LD | BPF_B | BPF_ABS, 0, 0, 0, 0x50 }, + { BPF_JMP | BPF_JEQ | BPF_K, 0, 0, 2, DATA_CHAR }, + { BPF_JMP | BPF_JEQ | BPF_K, 0, 0, 1, DATA_CHAR_1 }, + { BPF_ALU | BPF_MOV | BPF_K, 0, 0, 0, 0 }, + { BPF_JMP | BPF_EXIT, 0, 0, 0, 0 } + }; + char log_buf[512]; + union bpf_attr attr; + int pfd; + + memset(&attr, 0, sizeof(attr)); + attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; + attr.insns = (unsigned long) prog; + attr.insn_cnt = sizeof(prog) / sizeof(prog[0]); + attr.license = (unsigned long) "GPL"; + attr.log_buf = (unsigned long) log_buf, + attr.log_size = sizeof(log_buf), + attr.log_level = 1, + + pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); + if (pfd < 0) { + perror("bpf"); + fprintf(stderr, "bpf verifier:\n%s\n", log_buf); + exit(1); + } + + if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) { + perror("fanout data ebpf"); + exit(1); + } + + if (close(pfd)) { + perror("close ebpf"); + exit(1); + } +} + static char *sock_fanout_open_ring(int fd) { struct tpacket_req req = { @@ -115,8 +164,8 @@ static char *sock_fanout_open_ring(int fd) ring = mmap(0, req.tp_block_size * req.tp_block_nr, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); - if (!ring) { - fprintf(stderr, "packetsock ring mmap\n"); + if (ring == MAP_FAILED) { + perror("packetsock ring mmap"); exit(1); } @@ -209,6 +258,7 @@ static int test_datapath(uint16_t typeflags, int port_off, { const int expect0[] = { 0, 0 }; char *rings[2]; + uint8_t type = typeflags & 0xFF; int fds[2], fds_udp[2][2], ret; fprintf(stderr, "test: datapath 0x%hx\n", typeflags); @@ -219,6 +269,11 @@ static int test_datapath(uint16_t typeflags, int port_off, fprintf(stderr, "ERROR: failed open\n"); exit(1); } + if (type == PACKET_FANOUT_CBPF) + sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA); + else if (type == PACKET_FANOUT_EBPF) + sock_fanout_set_ebpf(fds[0]); + rings[0] = sock_fanout_open_ring(fds[0]); rings[1] = sock_fanout_open_ring(fds[1]); pair_udp_open(fds_udp[0], PORT_BASE); @@ -227,11 +282,11 @@ static int test_datapath(uint16_t typeflags, int port_off, /* Send data, but not enough to overflow a queue */ pair_udp_send(fds_udp[0], 15); - pair_udp_send(fds_udp[1], 5); + pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1); ret = sock_fanout_read(fds, rings, expect1); /* Send more data, overflow the queue */ - pair_udp_send(fds_udp[0], 15); + pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1); /* TODO: ensure consistent order between expect1 and expect2 */ ret |= sock_fanout_read(fds, rings, expect2); @@ -275,6 +330,7 @@ int main(int argc, char **argv) const int expect_rb[2][2] = { { 15, 5 }, { 20, 15 } }; const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } }; const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } }; + const int expect_bpf[2][2] = { { 15, 5 }, { 15, 20 } }; int port_off = 2, tries = 5, ret; test_control_single(); @@ -296,6 +352,11 @@ int main(int argc, char **argv) ret |= test_datapath(PACKET_FANOUT_ROLLOVER, port_off, expect_rb[0], expect_rb[1]); + ret |= test_datapath(PACKET_FANOUT_CBPF, + port_off, expect_bpf[0], expect_bpf[1]); + ret |= test_datapath(PACKET_FANOUT_EBPF, + port_off, expect_bpf[0], expect_bpf[1]); + set_cpuaffinity(0); ret |= test_datapath(PACKET_FANOUT_CPU, port_off, expect_cpu0[0], expect_cpu0[1]); diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h index 37da54ac85a9..24bc7ec1be7d 100644 --- a/tools/testing/selftests/net/psock_lib.h +++ b/tools/testing/selftests/net/psock_lib.h @@ -30,6 +30,7 @@ #define DATA_LEN 100 #define DATA_CHAR 'a' +#define DATA_CHAR_1 'b' #define PORT_BASE 8000 @@ -37,29 +38,36 @@ # define __maybe_unused __attribute__ ((__unused__)) #endif -static __maybe_unused void pair_udp_setfilter(int fd) +static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum) { struct sock_filter bpf_filter[] = { { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */ - { 0x35, 0, 5, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/ + { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/ { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */ - { 0x15, 0, 3, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/ - { 0x30, 0, 0, 0x00000051 }, /* LD ip[81] */ - { 0x15, 0, 1, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/ + { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/ + { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/ { 0x06, 0, 0, 0x00000060 }, /* RET match */ { 0x06, 0, 0, 0x00000000 }, /* RET no match */ }; struct sock_fprog bpf_prog; + if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA) + bpf_filter[5].code = 0x16; /* RET A */ + bpf_prog.filter = bpf_filter; bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter); - if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog, + if (setsockopt(fd, lvl, optnum, &bpf_prog, sizeof(bpf_prog))) { perror("setsockopt SO_ATTACH_FILTER"); exit(1); } } +static __maybe_unused void pair_udp_setfilter(int fd) +{ + sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER); +} + static __maybe_unused void pair_udp_open(int fds[], uint16_t port) { struct sockaddr_in saddr, daddr; @@ -96,11 +104,11 @@ static __maybe_unused void pair_udp_open(int fds[], uint16_t port) } } -static __maybe_unused void pair_udp_send(int fds[], int num) +static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload) { char buf[DATA_LEN], rbuf[DATA_LEN]; - memset(buf, DATA_CHAR, sizeof(buf)); + memset(buf, payload, sizeof(buf)); while (num--) { /* Should really handle EINTR and EAGAIN */ if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) { @@ -118,6 +126,11 @@ static __maybe_unused void pair_udp_send(int fds[], int num) } } +static __maybe_unused void pair_udp_send(int fds[], int num) +{ + return pair_udp_send_char(fds, num, DATA_CHAR); +} + static __maybe_unused void pair_udp_close(int fds[]) { close(fds[0]);