1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Unbalanced refcounting in TIPC, from Jon Maloy.

 2) Only allow TCP_MD5SIG to be set on sockets in close or listen state.
    Once the connection is established it makes no sense to change this.
    From Eric Dumazet.

 3) Missing attribute validation in neigh_dump_table(), also from Eric
    Dumazet.

 4) Fix address comparisons in SCTP, from Xin Long.

 5) Neigh proxy table clearing can deadlock, from Wolfgang Bumiller.

 6) Fix tunnel refcounting in l2tp, from Guillaume Nault.

 7) Fix double list insert in team driver, from Paolo Abeni.

 8) af_vsock.ko module was accidently made unremovable, from Stefan
    Hajnoczi.

 9) Fix reference to freed llc_sap object in llc stack, from Cong Wang.

10) Don't assume netdevice struct is DMA'able memory in virtio_net
    driver, from Michael S. Tsirkin.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (62 commits)
  net/smc: fix shutdown in state SMC_LISTEN
  bnxt_en: Fix memory fault in bnxt_ethtool_init()
  virtio_net: sparse annotation fix
  virtio_net: fix adding vids on big-endian
  virtio_net: split out ctrl buffer
  net: hns: Avoid action name truncation
  docs: ip-sysctl.txt: fix name of some ipv6 variables
  vmxnet3: fix incorrect dereference when rxvlan is disabled
  llc: hold llc_sap before release_sock()
  MAINTAINERS: Direct networking documentation changes to netdev
  atm: iphase: fix spelling mistake: "Tansmit" -> "Transmit"
  net: qmi_wwan: add Wistron Neweb D19Q1
  net: caif: fix spelling mistake "UKNOWN" -> "UNKNOWN"
  net: stmmac: Disable ACS Feature for GMAC >= 4
  net: mvpp2: Fix DMA address mask size
  net: change the comment of dev_mc_init
  net: qualcomm: rmnet: Fix warning seen with fill_info
  tun: fix vlan packet truncation
  tipc: fix infinite loop when dumping link monitor summary
  tipc: fix use-after-free in tipc_nametbl_stop
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-04-20 09:34:39 -07:00
commit a72db42cee
69 changed files with 793 additions and 356 deletions

View File

@ -136,6 +136,19 @@ Sorting
.. kernel-doc:: lib/list_sort.c
:export:
Text Searching
--------------
.. kernel-doc:: lib/textsearch.c
:doc: ts_intro
.. kernel-doc:: lib/textsearch.c
:export:
.. kernel-doc:: include/linux/textsearch.h
:functions: textsearch_find textsearch_next \
textsearch_get_pattern textsearch_get_pattern_len
UUID/GUID
---------

View File

@ -169,7 +169,7 @@ access to BPF code as well.
BPF engine and instruction set
------------------------------
Under tools/net/ there's a small helper tool called bpf_asm which can
Under tools/bpf/ there's a small helper tool called bpf_asm which can
be used to write low-level filters for example scenarios mentioned in the
previous section. Asm-like syntax mentioned here has been implemented in
bpf_asm and will be used for further explanations (instead of dealing with
@ -359,7 +359,7 @@ $ ./bpf_asm -c foo
In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF
filters that might not be obvious at first, it's good to test filters before
attaching to a live system. For that purpose, there's a small tool called
bpf_dbg under tools/net/ in the kernel source directory. This debugger allows
bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows
for testing BPF filters against given pcap files, single stepping through the
BPF code on the pcap's packets and to do BPF machine register dumps.
@ -483,7 +483,7 @@ Example output from dmesg:
[ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00
[ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3
In the kernel source tree under tools/net/, there's bpf_jit_disasm for
In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for
generating disassembly out of the kernel log's hexdump:
# ./bpf_jit_disasm

View File

@ -1390,26 +1390,26 @@ mld_qrv - INTEGER
Default: 2 (as specified by RFC3810 9.1)
Minimum: 1 (as specified by RFC6636 4.5)
max_dst_opts_cnt - INTEGER
max_dst_opts_number - INTEGER
Maximum number of non-padding TLVs allowed in a Destination
options extension header. If this value is less than zero
then unknown options are disallowed and the number of known
TLVs allowed is the absolute value of this number.
Default: 8
max_hbh_opts_cnt - INTEGER
max_hbh_opts_number - INTEGER
Maximum number of non-padding TLVs allowed in a Hop-by-Hop
options extension header. If this value is less than zero
then unknown options are disallowed and the number of known
TLVs allowed is the absolute value of this number.
Default: 8
max dst_opts_len - INTEGER
max_dst_opts_length - INTEGER
Maximum length allowed for a Destination options extension
header.
Default: INT_MAX (unlimited)
max hbh_opts_len - INTEGER
max_hbh_length - INTEGER
Maximum length allowed for a Hop-by-Hop options extension
header.
Default: INT_MAX (unlimited)

View File

@ -9773,6 +9773,7 @@ F: include/uapi/linux/net_namespace.h
F: tools/testing/selftests/net/
F: lib/net_utils.c
F: lib/random32.c
F: Documentation/networking/
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>

View File

@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
if ((vcc->pop) && (skb1->len != 0))
{
vcc->pop(vcc, skb1);
IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
(long)skb1);)
}
else
@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
if (status & TRANSMIT_DONE){
IF_EVENT(printk("Tansmit Done Intr logic run\n");)
IF_EVENT(printk("Transmit Done Intr logic run\n");)
spin_lock_irqsave(&iadev->tx_lock, flags);
ia_tx_poll(iadev);
spin_unlock_irqrestore(&iadev->tx_lock, flags);

View File

@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
goto _do;
{
char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
strcpy(_dup, arg);
dup = _dup;
dup = kstrdup(arg, GFP_ATOMIC);
if (!dup)
return;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
deftaps = tmp;
}
}
kfree(dup);
}
_do:

View File

@ -279,7 +279,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
u16 timebase, u8 *buf, int len)
{
u8 *p;
u8 frame[len + 32];
u8 frame[MAX_DFRAME_LEN_L1 + 32];
struct socket *socket = NULL;
if (debug & DEBUG_L1OIP_MSG)
@ -902,7 +902,11 @@ handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
/*
* This is technically bounded by L1OIP_MAX_PERFRAME but
* MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
*/
ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, 0, dch->slot, 0,
hc->chan[dch->slot].tx_counter++, p, ll);
p += ll;
@ -1140,7 +1144,11 @@ handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
p = skb->data;
l = skb->len;
while (l) {
ll = (l < L1OIP_MAX_PERFRAME) ? l : L1OIP_MAX_PERFRAME;
/*
* This is technically bounded by L1OIP_MAX_PERFRAME but
* MAX_DFRAME_LEN_L1 < L1OIP_MAX_PERFRAME
*/
ll = (l < MAX_DFRAME_LEN_L1) ? l : MAX_DFRAME_LEN_L1;
l1oip_socket_send(hc, hc->codec, bch->slot, 0,
hc->chan[bch->slot].tx_counter, p, ll);
hc->chan[bch->slot].tx_counter += ll;

View File

@ -285,10 +285,18 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
struct sk_buff_head *rxq)
{
u16 buf[4] = { 0 }, status, seq_id;
u64 ns, timelo, timehi;
struct skb_shared_hwtstamps *shwt;
struct sk_buff_head received;
u64 ns, timelo, timehi;
unsigned long flags;
int err;
/* The latched timestamp belongs to one of the received frames. */
__skb_queue_head_init(&received);
spin_lock_irqsave(&rxq->lock, flags);
skb_queue_splice_tail_init(rxq, &received);
spin_unlock_irqrestore(&rxq->lock, flags);
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
reg, buf, ARRAY_SIZE(buf));
@ -311,7 +319,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
/* Since the device can only handle one time stamp at a time,
* we purge any extra frames from the queue.
*/
for ( ; skb; skb = skb_dequeue(rxq)) {
for ( ; skb; skb = __skb_dequeue(&received)) {
if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
ns = timehi << 16 | timelo;

View File

@ -1927,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
u32 datalen;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
int len;
if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
&index, NULL, &datalen) != 0)
return NULL;
&index, NULL, &pkglen) != 0)
return;
memset(buf, 0, buflen);
if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
return NULL;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
return;
}
return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
datalen);
if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
"/pkg %s", pkgver);
}
err:
kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@ -2615,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct hwrm_selftest_qlist_input req = {0};
struct bnxt_test_info *test_info;
struct net_device *dev = bp->dev;
char *pkglog;
int i, rc;
pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
if (pkglog) {
char *pkgver;
int len;
bnxt_get_pkgver(dev);
pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
"/pkg %s", pkgver);
}
kfree(pkglog);
}
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;

View File

@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0)
#define BNX_DIR_ATTR_PROP_STREAM (1 << 1)
#define BNX_PKG_LOG_MAX_LENGTH 4096
enum bnxnvm_pkglog_field_index {
BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,

View File

@ -87,7 +87,7 @@ do { \
#define HNAE_AE_REGISTER 0x1
#define RCB_RING_NAME_LEN 16
#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
#define HNAE_LOW_LATENCY_COAL_PARAM 80

View File

@ -794,46 +794,61 @@ static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
struct device *dev = &adapter->vdev->dev;
int retry_count = 0;
int rc;
do {
if (adapter->renegotiate) {
adapter->renegotiate = false;
if (retry_count > IBMVNIC_MAX_QUEUES) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
}
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
rc = send_login(adapter);
if (rc) {
netdev_warn(netdev, "Unable to login\n");
return rc;
}
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_warn(netdev, "Login timed out\n");
return -1;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
retry_count++;
release_sub_crqs(adapter, 1);
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Capabilities query timeout\n");
netdev_warn(netdev,
"Capabilities query timed out\n");
return -1;
}
rc = init_sub_crqs(adapter);
if (rc) {
dev_err(dev,
"Initialization of SCRQ's failed\n");
netdev_warn(netdev,
"SCRQ initialization failed\n");
return -1;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
dev_err(dev,
"Initialization of SCRQ's irqs failed\n");
netdev_warn(netdev,
"SCRQ irq initialization failed\n");
return -1;
}
}
reinit_completion(&adapter->init_done);
rc = send_login(adapter);
if (rc) {
dev_err(dev, "Unable to attempt device login\n");
return rc;
} else if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Login timeout\n");
} else if (adapter->init_done_rc) {
netdev_warn(netdev, "Adapter login failed\n");
return -1;
}
} while (adapter->renegotiate);
} while (adapter->init_done_rc == PARTIALSUCCESS);
/* handle pending MAC address changes after successful login */
if (adapter->mac_change_pending) {
@ -1034,16 +1049,14 @@ static int __ibmvnic_open(struct net_device *netdev)
netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->rx_scrq[i]->irq);
else
enable_scrq_irq(adapter, adapter->rx_scrq[i]);
enable_scrq_irq(adapter, adapter->rx_scrq[i]);
}
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
else
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@ -1184,6 +1197,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling tx_scrq[%d] irq\n", i);
disable_scrq_irq(adapter, adapter->tx_scrq[i]);
disable_irq(adapter->tx_scrq[i]->irq);
}
}
@ -1193,6 +1207,7 @@ static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]->irq) {
netdev_dbg(netdev,
"Disabling rx_scrq[%d] irq\n", i);
disable_scrq_irq(adapter, adapter->rx_scrq[i]);
disable_irq(adapter->rx_scrq[i]->irq);
}
}
@ -1828,7 +1843,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
if (adapter->reset_reason != VNIC_RESET_FAILOVER)
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
netdev_notify_peers(netdev);
netif_carrier_on(netdev);
@ -2601,12 +2617,19 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
{
struct device *dev = &adapter->vdev->dev;
unsigned long rc;
u64 val;
if (scrq->hw_irq > 0x100000000ULL) {
dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
return 1;
}
val = (0xff000000) | scrq->hw_irq;
rc = plpar_hcall_norets(H_EOI, val);
if (rc)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
if (rc)
@ -3170,7 +3193,7 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
struct vnic_login_client_data {
u8 type;
__be16 len;
char name;
char name[];
} __packed;
static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
@ -3199,21 +3222,21 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
vlcd->type = 1;
len = strlen(os_name) + 1;
vlcd->len = cpu_to_be16(len);
strncpy(&vlcd->name, os_name, len);
vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
strncpy(vlcd->name, os_name, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 2 - LPAR name */
vlcd->type = 2;
len = strlen(utsname()->nodename) + 1;
vlcd->len = cpu_to_be16(len);
strncpy(&vlcd->name, utsname()->nodename, len);
vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
strncpy(vlcd->name, utsname()->nodename, len);
vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
/* Type 3 - device name */
vlcd->type = 3;
len = strlen(adapter->netdev->name) + 1;
vlcd->len = cpu_to_be16(len);
strncpy(&vlcd->name, adapter->netdev->name, len);
strncpy(vlcd->name, adapter->netdev->name, len);
}
static int send_login(struct ibmvnic_adapter *adapter)
@ -3942,7 +3965,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
* to resend the login buffer with fewer queues requested.
*/
if (login_rsp_crq->generic.rc.code) {
adapter->renegotiate = true;
adapter->init_done_rc = login_rsp_crq->generic.rc.code;
complete(&adapter->init_done);
return 0;
}

View File

@ -1035,7 +1035,6 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
bool renegotiate;
/* rx structs */
struct napi_struct *napi;

View File

@ -663,7 +663,7 @@ enum mvpp2_tag_type {
#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
@ -916,6 +916,8 @@ static struct {
#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Definitions */
/* Shared Packet Processor resources */
@ -1429,7 +1431,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return tx_desc->pp21.buf_dma_addr;
else
return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
}
static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
@ -1447,7 +1449,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
} else {
u64 val = (u64)addr;
tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
tx_desc->pp22.buf_dma_addr_ptp |= val;
tx_desc->pp22.packet_offset = offset;
}
@ -1507,7 +1509,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_dma_addr;
else
return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
}
static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
@ -1516,7 +1518,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
if (port->priv->hw_version == MVPP21)
return rx_desc->pp21.buf_cookie;
else
return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
}
static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
@ -8789,7 +8791,7 @@ static int mvpp2_probe(struct platform_device *pdev)
}
if (priv->hw_version == MVPP22) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
goto err_mg_clk;
/* Sadly, the BM pools all share the same register to

View File

@ -258,9 +258,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH:
/* Acks from the NFP that the route is added - ignore. */
break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
@ -275,18 +272,49 @@ out:
void nfp_flower_cmsg_process_rx(struct work_struct *work)
{
struct sk_buff_head cmsg_joined;
struct nfp_flower_priv *priv;
struct sk_buff *skb;
priv = container_of(work, struct nfp_flower_priv, cmsg_work);
skb_queue_head_init(&cmsg_joined);
while ((skb = skb_dequeue(&priv->cmsg_skbs)))
spin_lock_bh(&priv->cmsg_skbs_high.lock);
skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
spin_unlock_bh(&priv->cmsg_skbs_high.lock);
spin_lock_bh(&priv->cmsg_skbs_low.lock);
skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
spin_unlock_bh(&priv->cmsg_skbs_low.lock);
while ((skb = __skb_dequeue(&cmsg_joined)))
nfp_flower_cmsg_process_one_rx(priv->app, skb);
}
static void
nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
{
struct nfp_flower_priv *priv = app->priv;
struct sk_buff_head *skb_head;
if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
skb_head = &priv->cmsg_skbs_high;
else
skb_head = &priv->cmsg_skbs_low;
if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
dev_kfree_skb_any(skb);
return;
}
skb_queue_tail(skb_head, skb);
schedule_work(&priv->cmsg_work);
}
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_cmsg_hdr *cmsg_hdr;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
@ -306,8 +334,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
nfp_flower_process_mtu_ack(app, skb)) {
/* Handle MTU acks outside wq to prevent RTNL conflict. */
dev_consume_skb_any(skb);
} else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
/* Acks from the NFP that the route is added - ignore. */
dev_consume_skb_any(skb);
} else {
skb_queue_tail(&priv->cmsg_skbs, skb);
schedule_work(&priv->cmsg_work);
nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
}
}

View File

@ -108,6 +108,8 @@
#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4)
#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0)
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \

View File

@ -519,7 +519,8 @@ static int nfp_flower_init(struct nfp_app *app)
app->priv = app_priv;
app_priv->app = app;
skb_queue_head_init(&app_priv->cmsg_skbs);
skb_queue_head_init(&app_priv->cmsg_skbs_high);
skb_queue_head_init(&app_priv->cmsg_skbs_low);
INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
init_waitqueue_head(&app_priv->reify_wait_queue);
@ -549,7 +550,8 @@ static void nfp_flower_clean(struct nfp_app *app)
{
struct nfp_flower_priv *app_priv = app->priv;
skb_queue_purge(&app_priv->cmsg_skbs);
skb_queue_purge(&app_priv->cmsg_skbs_high);
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
nfp_flower_metadata_cleanup(app);

View File

@ -107,7 +107,10 @@ struct nfp_mtu_conf {
* @mask_table: Hash table used to store masks
* @flow_table: Hash table used to store flower rules
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs: List of skbs for control message processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
* @cmsg_skbs_low: List of lower priority skbs for control message
* processing
* @nfp_mac_off_list: List of MAC addresses to offload
* @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
* @nfp_ipv4_off_list: List of IPv4 addresses to offload
@ -136,7 +139,8 @@ struct nfp_flower_priv {
DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
struct list_head nfp_mac_off_list;
struct list_head nfp_mac_index_list;
struct list_head nfp_ipv4_off_list;

View File

@ -211,8 +211,11 @@ int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
break;
err = msleep_interruptible(timeout_ms);
if (err != 0)
if (err != 0) {
nfp_info(mutex->cpp,
"interrupted waiting for NFP mutex\n");
return -ERESTARTSYS;
}
if (time_is_before_eq_jiffies(warn_at)) {
warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ;

View File

@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
if ((*reg & mask) == val)
return 0;
if (msleep_interruptible(25))
return -ERESTARTSYS;
msleep(25);
if (time_after(start_time, wait_until))
return -ETIMEDOUT;

View File

@ -350,15 +350,16 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
real_dev = priv->real_dev;
if (!rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
goto nla_put_failure;
port = rmnet_get_port_rtnl(real_dev);
if (rmnet_is_real_dev_registered(real_dev)) {
port = rmnet_get_port_rtnl(real_dev);
f.flags = port->data_format;
} else {
f.flags = 0;
}
f.flags = port->data_format;
f.mask = ~0;
if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))

View File

@ -4776,8 +4776,7 @@ static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
goto out_unlock;
}
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
flow_id, filter_idx)) {
if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) {
ret = false;
goto out_unlock;
}
@ -5265,7 +5264,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@ -5334,7 +5333,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);

View File

@ -2912,7 +2912,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
if (test_bit(index, table->used_bitmap) &&
table->spec[index].priority == EFX_FILTER_PRI_HINT &&
rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
flow_id, index)) {
flow_id, 0)) {
efx_farch_filter_table_clear_entry(efx, table, index);
ret = true;
}

View File

@ -733,6 +733,27 @@ struct efx_rss_context {
u32 rx_indir_table[128];
};
#ifdef CONFIG_RFS_ACCEL
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
* @flow_id: Identifies the kernel-side flow for which this request was made
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
u32 flow_id;
};
/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
#define EFX_RPS_MAX_IN_FLIGHT 8
#endif /* CONFIG_RFS_ACCEL */
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@ -850,6 +871,8 @@ struct efx_rss_context {
* @rps_expire_channel: Next channel to check for expiry
* @rps_expire_index: Next index to check for expiry in
* @rps_expire_channel's @rps_flow_id
* @rps_slot_map: bitmap of in-flight entries in @rps_slot
* @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@ -1004,6 +1027,8 @@ struct efx_nic {
struct mutex rps_mutex;
unsigned int rps_expire_channel;
unsigned int rps_expire_index;
unsigned long rps_slot_map;
struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
#endif
atomic_t active_queues;

View File

@ -827,31 +827,16 @@ MODULE_PARM_DESC(rx_refill_threshold,
#ifdef CONFIG_RFS_ACCEL
/**
* struct efx_async_filter_insertion - Request to asynchronously insert a filter
* @net_dev: Reference to the netdevice
* @spec: The filter to insert
* @work: Workitem for this request
* @rxq_index: Identifies the channel for which this request was made
* @flow_id: Identifies the kernel-side flow for which this request was made
*/
struct efx_async_filter_insertion {
struct net_device *net_dev;
struct efx_filter_spec spec;
struct work_struct work;
u16 rxq_index;
u32 flow_id;
};
static void efx_filter_rfs_work(struct work_struct *data)
{
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
work);
struct efx_nic *efx = netdev_priv(req->net_dev);
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
int slot_idx = req - efx->rps_slot;
int rc;
rc = efx->type->filter_insert(efx, &req->spec, false);
rc = efx->type->filter_insert(efx, &req->spec, true);
if (rc >= 0) {
/* Remember this so we can check whether to expire the filter
* later.
@ -878,8 +863,8 @@ static void efx_filter_rfs_work(struct work_struct *data)
}
/* Release references */
clear_bit(slot_idx, &efx->rps_slot_map);
dev_put(req->net_dev);
kfree(req);
}
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
@ -888,22 +873,36 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
struct flow_keys fk;
int slot_idx;
int rc;
if (flow_id == RPS_FLOW_ID_INVALID)
return -EINVAL;
/* find a free slot */
for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
break;
if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
return -EBUSY;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT;
if (flow_id == RPS_FLOW_ID_INVALID) {
rc = -EINVAL;
goto out_clear;
}
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
rc = -EPROTONOSUPPORT;
goto out_clear;
}
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
rc = -EPROTONOSUPPORT;
goto out_clear;
}
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
rc = -EPROTONOSUPPORT;
goto out_clear;
}
req = efx->rps_slot + slot_idx;
efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
rxq_index);
@ -933,6 +932,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
req->flow_id = flow_id;
schedule_work(&req->work);
return 0;
out_clear:
clear_bit(slot_idx, &efx->rps_slot_map);
return rc;
}
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)

View File

@ -347,7 +347,7 @@ enum power_event {
#define MTL_RX_OVERFLOW_INT BIT(16)
/* Default operating mode of the MAC */
#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
/* To dump the core regs excluding the Address Registers */

View File

@ -31,13 +31,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
value |= GMAC_CORE_INIT;
/* Clear ACS bit because Ethernet switch tagging formats such as
* Broadcom tags can look like invalid LLC/SNAP packets and cause the
* hardware to truncate packets on reception.
*/
if (netdev_uses_dsa(dev))
value &= ~GMAC_CONFIG_ACS;
if (mtu > 1500)
value |= GMAC_CONFIG_2K;
if (mtu > 2000)

View File

@ -3495,8 +3495,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
*
* llc_snap is never checked in GMAC >= 4, so this ACS
* feature is always disabled and packets need to be
* stripped manually.
*/
if (unlikely(status != llc_snap))
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {

View File

@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
goto put_dev;
goto unregister;
/* need to be already registered so that ->init has run and
* the MAC addr is set
@ -3316,8 +3316,7 @@ del_dev:
macsec_del_dev(macsec);
unlink:
netdev_upper_dev_unlink(real_dev, dev);
put_dev:
dev_put(real_dev);
unregister:
unregister_netdevice(dev);
return err;
}

View File

@ -20,6 +20,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/microchipphy.h>
#include <linux/delay.h>
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
#define DRIVER_DESC "Microchip LAN88XX PHY driver"
@ -30,6 +31,16 @@ struct lan88xx_priv {
__u32 wolopts;
};
static int lan88xx_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
}
static int lan88xx_write_page(struct phy_device *phydev, int page)
{
return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
}
static int lan88xx_phy_config_intr(struct phy_device *phydev)
{
int rc;
@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev)
return 0;
}
static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
u32 data)
{
int val, save_page, ret = 0;
u16 buf;
/* Save current page */
save_page = phy_save_page(phydev);
if (save_page < 0) {
pr_warn("Failed to get current page\n");
goto err;
}
/* Switch to TR page */
lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
(data & 0xFFFF));
if (ret < 0) {
pr_warn("Failed to write TR low data\n");
goto err;
}
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
(data & 0x00FF0000) >> 16);
if (ret < 0) {
pr_warn("Failed to write TR high data\n");
goto err;
}
/* Config control bits [15:13] of register */
buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
buf |= 0x8000; /* Set [15] to Packet transmit */
ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
if (ret < 0) {
pr_warn("Failed to write data in reg\n");
goto err;
}
usleep_range(1000, 2000);/* Wait for Data to be written */
val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
if (!(val & 0x8000))
pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
err:
return phy_restore_page(phydev, save_page, ret);
}
static void lan88xx_config_TR_regs(struct phy_device *phydev)
{
int err;
/* Get access to Channel 0x1, Node 0xF , Register 0x01.
* Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
* MrvlTrFix1000Kp, MasterEnableTR bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
if (err < 0)
pr_warn("Failed to Set Register[0x0F82]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x06.
* Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
* SSTrKp1000Mas bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
if (err < 0)
pr_warn("Failed to Set Register[0x168C]\n");
/* Get access to Channel b'10, Node b'1111, Register 0x11.
* Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
* bits
*/
err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
if (err < 0)
pr_warn("Failed to Set Register[0x17A2]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x10.
* Write 24-bit value 0xEEFFDD to register. Setting
* eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
* eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
if (err < 0)
pr_warn("Failed to Set Register[0x16A0]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x13.
* Write 24-bit value 0x071448 to register. Setting
* slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
if (err < 0)
pr_warn("Failed to Set Register[0x16A6]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x12.
* Write 24-bit value 0x13132F to register. Setting
* slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
if (err < 0)
pr_warn("Failed to Set Register[0x16A4]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x14.
* Write 24-bit value 0x0 to register. Setting eee_3level_delay,
* eee_TrKf_freeze_delay bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
if (err < 0)
pr_warn("Failed to Set Register[0x16A8]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x34.
* Write 24-bit value 0x91B06C to register. Setting
* FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
* FastMseSearchUpdGain1000 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
if (err < 0)
pr_warn("Failed to Set Register[0x0FE8]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x3E.
* Write 24-bit value 0xC0A028 to register. Setting
* FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
* FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
if (err < 0)
pr_warn("Failed to Set Register[0x0FFC]\n");
/* Get access to Channel b'01, Node b'1111, Register 0x35.
* Write 24-bit value 0x041600 to register. Setting
* FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
* FastMsePhChangeDelay1000 bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
if (err < 0)
pr_warn("Failed to Set Register[0x0FEA]\n");
/* Get access to Channel b'10, Node b'1101, Register 0x03.
* Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
*/
err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
if (err < 0)
pr_warn("Failed to Set Register[0x1686]\n");
}
static int lan88xx_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
}
static int lan88xx_config_init(struct phy_device *phydev)
{
int val;
genphy_config_init(phydev);
/*Zerodetect delay enable */
val = phy_read_mmd(phydev, MDIO_MMD_PCS,
PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
val);
/* Config DSP registers */
lan88xx_config_TR_regs(phydev);
return 0;
}
static int lan88xx_config_aneg(struct phy_device *phydev)
{
lan88xx_set_mdix(phydev);
@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = {
.probe = lan88xx_probe,
.remove = lan88xx_remove,
.config_init = genphy_config_init,
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
.ack_interrupt = lan88xx_phy_ack_interrupt,
@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = {
.suspend = lan88xx_suspend,
.resume = genphy_resume,
.set_wol = lan88xx_set_wol,
.read_page = lan88xx_read_page,
.write_page = lan88xx_write_page,
} };
module_phy_driver(microchip_phy_driver);

View File

@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
static bool __team_option_inst_tmp_find(const struct list_head *opts,
const struct team_option_inst *needle)
{
struct team_option_inst *opt_inst;
list_for_each_entry(opt_inst, opts, tmp_list)
if (opt_inst == needle)
return true;
return false;
}
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@ -2568,6 +2579,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
/* dumb/evil user-space can send us duplicate opt,
* keep only the last one
*/
if (__team_option_inst_tmp_find(&opt_inst_list,
opt_inst))
continue;
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {

View File

@ -1102,12 +1102,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
len = run_ebpf_filter(tun, skb, len);
/* Trim extra bytes since we may insert vlan proto & TCI
* in tun_put_user().
*/
len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
if (len <= 0 || pskb_trim(skb, len))
if (len == 0 || pskb_trim(skb, len))
goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))

View File

@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */

View File

@ -147,6 +147,17 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
};
/* Control VQ buffers: protected by the rtnl lock */
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
struct virtio_net_ctrl_mq mq;
u8 promisc;
u8 allmulti;
__virtio16 vid;
__virtio64 offloads;
};
struct virtnet_info {
struct virtio_device *vdev;
struct virtqueue *cvq;
@ -192,14 +203,7 @@ struct virtnet_info {
struct hlist_node node;
struct hlist_node node_dead;
/* Control VQ buffers: protected by the rtnl lock */
struct virtio_net_ctrl_hdr ctrl_hdr;
virtio_net_ctrl_ack ctrl_status;
struct virtio_net_ctrl_mq ctrl_mq;
u8 ctrl_promisc;
u8 ctrl_allmulti;
u16 ctrl_vid;
u64 ctrl_offloads;
struct control_buf *ctrl;
/* Ethtool settings */
u8 duplex;
@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct receive_queue *rq =
container_of(napi, struct receive_queue, napi);
unsigned int received;
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
unsigned int received, qp;
bool xdp_xmit = false;
virtnet_poll_cleantx(rq);
@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
if (xdp_xmit)
if (xdp_xmit) {
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
smp_processor_id();
sq = &vi->sq[qp];
virtqueue_kick(sq->vq);
xdp_do_flush_map();
}
return received;
}
@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
vi->ctrl_status = ~0;
vi->ctrl_hdr.class = class;
vi->ctrl_hdr.cmd = cmd;
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
/* Add header */
sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
sgs[out_num++] = &hdr;
if (out)
sgs[out_num++] = out;
/* Add return status. */
sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl_status == VIRTIO_NET_OK;
return vi->ctrl->status == VIRTIO_NET_OK;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
!virtqueue_is_broken(vi->cvq))
cpu_relax();
return vi->ctrl_status == VIRTIO_NET_OK;
return vi->ctrl->status == VIRTIO_NET_OK;
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
vi->ctrl_promisc ? "en" : "dis");
vi->ctrl->promisc ? "en" : "dis");
sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl_allmulti ? "en" : "dis");
vi->ctrl->allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
vi->ctrl_vid = vid;
sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
vi->ctrl_vid = vid;
sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
struct scatterlist sg;
vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
kfree(vi->rq);
kfree(vi->sq);
kfree(vi->ctrl);
}
static void _free_receive_bufs(struct virtnet_info *vi)
@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
{
int i;
vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
if (!vi->ctrl)
goto err_ctrl;
vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
if (!vi->sq)
goto err_sq;
@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
err_rq:
kfree(vi->sq);
err_sq:
kfree(vi->ctrl);
err_ctrl:
return -ENOMEM;
}

View File

@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
union {
void *ptr;
struct ethhdr *eth;
struct vlan_ethhdr *veth;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
return 0;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb->protocol == cpu_to_be16(ETH_P_8021AD))
hlen = sizeof(struct vlan_ethhdr);
else
hlen = sizeof(struct ethhdr);
hdr.eth = eth_hdr(skb);
if (gdesc->rcd.v4) {
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
hdr.ptr += sizeof(struct ethhdr);
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
hdr.ptr += hlen;
BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
hlen = hdr.ipv4->ihl << 2;
hdr.ptr += hdr.ipv4->ihl << 2;
} else if (gdesc->rcd.v6) {
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
hdr.ptr += sizeof(struct ethhdr);
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
hdr.ptr += hlen;
/* Use an estimated value, since we also need to handle
* TSO case.
*/

View File

@ -69,10 +69,10 @@
/*
* Version numbers
*/
#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */

View File

@ -663,7 +663,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@ -673,6 +673,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (likely(!eth_type_vlan(protocol)))
return false;
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@ -690,7 +693,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {

View File

@ -70,4 +70,12 @@
#define LAN88XX_MMD3_CHIP_ID (32877)
#define LAN88XX_MMD3_CHIP_REV (32878)
/* DSP registers */
#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG (0x806A)
#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_ (0x2000)
#define LAN88XX_EXT_PAGE_ACCESS_TR (0x52B5)
#define LAN88XX_EXT_PAGE_TR_CR 16
#define LAN88XX_EXT_PAGE_TR_LOW_DATA 17
#define LAN88XX_EXT_PAGE_TR_HIGH_DATA 18
#endif /* _MICROCHIPPHY_H */

View File

@ -62,7 +62,7 @@ struct ts_config
int flags;
/**
* get_next_block - fetch next block of data
* @get_next_block: fetch next block of data
* @consumed: number of bytes consumed by the caller
* @dst: destination buffer
* @conf: search configuration
@ -79,7 +79,7 @@ struct ts_config
struct ts_state *state);
/**
* finish - finalize/clean a series of get_next_block() calls
* @finish: finalize/clean a series of get_next_block() calls
* @conf: search configuration
* @state: search state
*

View File

@ -10,7 +10,10 @@
* Pablo Neira Ayuso <pablo@netfilter.org>
*
* ==========================================================================
*
*/
/**
* DOC: ts_intro
* INTRODUCTION
*
* The textsearch infrastructure provides text searching facilities for
@ -19,7 +22,9 @@
*
* ARCHITECTURE
*
* User
* .. code-block:: none
*
* User
* +----------------+
* | finish()|<--------------(6)-----------------+
* |get_next_block()|<--------------(5)---------------+ |
@ -33,21 +38,21 @@
* | (3)|----->| find()/next() |-----------+ |
* | (7)|----->| destroy() |----------------------+
* +----------------+ +---------------+
*
* (1) User configures a search by calling _prepare() specifying the
* search parameters such as the pattern and algorithm name.
*
* (1) User configures a search by calling textsearch_prepare() specifying
* the search parameters such as the pattern and algorithm name.
* (2) Core requests the algorithm to allocate and initialize a search
* configuration according to the specified parameters.
* (3) User starts the search(es) by calling _find() or _next() to
* fetch subsequent occurrences. A state variable is provided
* to the algorithm to store persistent variables.
* (3) User starts the search(es) by calling textsearch_find() or
* textsearch_next() to fetch subsequent occurrences. A state variable
* is provided to the algorithm to store persistent variables.
* (4) Core eventually resets the search offset and forwards the find()
* request to the algorithm.
* (5) Algorithm calls get_next_block() provided by the user continuously
* to fetch the data to be searched in block by block.
* (6) Algorithm invokes finish() after the last call to get_next_block
* to clean up any leftovers from get_next_block. (Optional)
* (7) User destroys the configuration by calling _destroy().
* (7) User destroys the configuration by calling textsearch_destroy().
* (8) Core notifies the algorithm to destroy algorithm specific
* allocations. (Optional)
*
@ -62,9 +67,10 @@
* amount of times and even in parallel as long as a separate struct
* ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
* implementation and calling textsearch_find(). Both functions return
* The actual search is performed by either calling
* textsearch_find_continuous() for linear data or by providing
* an own get_next_block() implementation and
* calling textsearch_find(). Both functions return
* the position of the first occurrence of the pattern or UINT_MAX if
* no match was found. Subsequent occurrences can be found by calling
* textsearch_next() regardless of the linearity of the data.
@ -72,7 +78,7 @@
* Once you're done using a configuration it must be given back via
* textsearch_destroy.
*
* EXAMPLE
* EXAMPLE::
*
* int pos;
* struct ts_config *conf;
@ -87,13 +93,13 @@
* goto errout;
* }
*
* pos = textsearch_find_continuous(conf, &state, example, strlen(example));
* pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
* if (pos != UINT_MAX)
* panic("Oh my god, dancing chickens at %d\n", pos);
* panic("Oh my god, dancing chickens at \%d\n", pos);
*
* textsearch_destroy(conf);
* ==========================================================================
*/
/* ========================================================================== */
#include <linux/module.h>
#include <linux/types.h>
@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
*
* Returns the position of first occurrence of the pattern or
* %UINT_MAX if no occurrence was found.
*/
*/
unsigned int textsearch_find_continuous(struct ts_config *conf,
struct ts_state *state,
const void *data, unsigned int len)

View File

@ -174,7 +174,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
"REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
"REMOTE_SHUTDOWN" : "UNKNOWN CTRL COMMAND");

View File

@ -2969,7 +2969,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL(passthru_features_check);
static netdev_features_t dflt_features_check(const struct sk_buff *skb,
static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{

View File

@ -839,7 +839,7 @@ void dev_mc_flush(struct net_device *dev)
EXPORT_SYMBOL(dev_mc_flush);
/**
* dev_mc_flush - Init multicast address list
* dev_mc_init - Init multicast address list
* @dev: device
*
* Init multicast address list.

View File

@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t);
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid);
static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
struct net_device *dev);
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
pneigh_ifdown(tbl, dev);
write_unlock_bh(&tbl->lock);
pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
return -ENOENT;
}
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
struct net_device *dev)
{
struct pneigh_entry *n, **np;
struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
if (tbl->pdestructor)
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
kfree(n);
n->next = freelist;
freelist = n;
continue;
}
np = &n->next;
}
}
write_unlock_bh(&tbl->lock);
while ((n = freelist)) {
freelist = n->next;
n->next = NULL;
if (tbl->pdestructor)
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
kfree(n);
}
return -ENOENT;
}
@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
if (!err) {
if (tb[NDA_IFINDEX])
if (tb[NDA_IFINDEX]) {
if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
return -EINVAL;
filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
if (tb[NDA_MASTER])
}
if (tb[NDA_MASTER]) {
if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
return -EINVAL;
filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
}
if (filter_idx || filter_master_idx)
flags |= NLM_F_DUMP_FILTERED;
}

View File

@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (!opt_len) {
printk(KERN_WARNING
"Empty option to dns_resolver key\n");
if (opt_len <= 0 || opt_len > 128) {
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
opt_len);
return -EINVAL;
}
@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
}
bad_option_value:
printk(KERN_WARNING
"Option '%*.*s' to dns_resolver key:"
" bad/missing value\n",
opt_nlen, opt_nlen, opt);
pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}

View File

@ -1109,6 +1109,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
struct ip_options_rcu *opt;
struct rtable *rt;
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* setup for corking.
*/
@ -1124,9 +1128,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->flags |= IPCORK_OPT;
cork->addr = ipc->addr;
}
rt = *rtp;
if (unlikely(!rt))
return -EFAULT;
/*
* We steal reference to this route, caller should not release it
*/

View File

@ -2368,6 +2368,7 @@ void tcp_write_queue_purge(struct sock *sk)
INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0;
}
int tcp_disconnect(struct sock *sk, int flags)
@ -2417,7 +2418,6 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->packets_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
@ -2813,8 +2813,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
case TCP_MD5SIG_EXT:
/* Read the IP->Key mappings from userspace */
err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
else
err = -EINVAL;
break;
#endif
case TCP_USER_TIMEOUT:

View File

@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
{
const struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel;
int count = 0;
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
if (++count > nth) {
l2tp_tunnel_inc_refcount(tunnel);
rcu_read_unlock_bh();
return tunnel;
}
}
rcu_read_unlock_bh();
return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
/* Lookup a session. A new reference is held on the returned session. */
struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_tunnel *tunnel,
@ -335,26 +355,6 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
{
struct l2tp_net *pn = l2tp_pernet(net);
struct l2tp_tunnel *tunnel;
int count = 0;
rcu_read_lock_bh();
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
if (++count > nth) {
rcu_read_unlock_bh();
return tunnel;
}
}
rcu_read_unlock_bh();
return NULL;
}
EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
/*****************************************************************************
* Receive data handling
*****************************************************************************/

View File

@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
}
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
struct l2tp_session *l2tp_session_get(const struct net *net,
@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,

View File

@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data {
static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
{
pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
/* Drop reference taken during previous invocation */
if (pd->tunnel)
l2tp_tunnel_dec_refcount(pd->tunnel);
pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
pd->tunnel_idx++;
}
@ -96,7 +100,14 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
{
/* nothing to do */
struct l2tp_dfs_seq_data *pd = v;
if (!pd || pd == SEQ_START_TOKEN)
return;
/* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
if (pd->tunnel)
l2tp_tunnel_dec_refcount(pd->tunnel);
}
static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)

View File

@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
struct net *net = sock_net(skb->sk);
for (;;) {
tunnel = l2tp_tunnel_find_nth(net, ti);
tunnel = l2tp_tunnel_get_nth(net, ti);
if (tunnel == NULL)
goto out;
if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
tunnel, L2TP_CMD_TUNNEL_GET) < 0)
tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
l2tp_tunnel_dec_refcount(tunnel);
goto out;
}
l2tp_tunnel_dec_refcount(tunnel);
ti++;
}
@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
for (;;) {
if (tunnel == NULL) {
tunnel = l2tp_tunnel_find_nth(net, ti);
tunnel = l2tp_tunnel_get_nth(net, ti);
if (tunnel == NULL)
goto out;
}
@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
session = l2tp_session_get_nth(tunnel, si);
if (session == NULL) {
ti++;
l2tp_tunnel_dec_refcount(tunnel);
tunnel = NULL;
si = 0;
continue;
@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
cb->nlh->nlmsg_seq, NLM_F_MULTI,
session, L2TP_CMD_SESSION_GET) < 0) {
l2tp_session_dec_refcount(session);
l2tp_tunnel_dec_refcount(tunnel);
break;
}
l2tp_session_dec_refcount(session);

View File

@ -1551,16 +1551,19 @@ struct pppol2tp_seq_data {
static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
{
/* Drop reference taken during previous invocation */
if (pd->tunnel)
l2tp_tunnel_dec_refcount(pd->tunnel);
for (;;) {
pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
pd->tunnel_idx++;
if (pd->tunnel == NULL)
break;
/* Only accept L2TPv2 tunnels */
if (!pd->tunnel || pd->tunnel->version == 2)
return;
/* Ignore L2TPv3 tunnels */
if (pd->tunnel->version < 3)
break;
l2tp_tunnel_dec_refcount(pd->tunnel);
}
}
@ -1609,7 +1612,14 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
static void pppol2tp_seq_stop(struct seq_file *p, void *v)
{
/* nothing to do */
struct pppol2tp_seq_data *pd = v;
if (!pd || pd == SEQ_START_TOKEN)
return;
/* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
if (pd->tunnel)
l2tp_tunnel_dec_refcount(pd->tunnel);
}
static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)

View File

@ -189,6 +189,7 @@ static int llc_ui_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct llc_sock *llc;
struct llc_sap *sap;
if (unlikely(sk == NULL))
goto out;
@ -199,9 +200,15 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
sap = llc->sap;
/* Hold this for release_sock(), so that llc_backlog_rcv() could still
* use it.
*/
llc_sap_hold(sap);
if (!sock_flag(sk, SOCK_ZAPPED))
llc_sap_remove_socket(llc->sap, sk);
release_sock(sk);
llc_sap_put(sap);
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);

View File

@ -3008,6 +3008,7 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
@ -3017,6 +3018,7 @@ static int packet_release(struct socket *sock)
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
release_sock(sk);
f = fanout_release(sk);
@ -3643,6 +3645,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
union tpacket_req_u req_u;
int len;
lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
@ -3653,12 +3656,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
len = sizeof(req_u.req3);
break;
}
if (optlen < len)
return -EINVAL;
if (copy_from_user(&req_u.req, optval, len))
return -EFAULT;
return packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
if (optlen < len) {
ret = -EINVAL;
} else {
if (copy_from_user(&req_u.req, optval, len))
ret = -EFAULT;
else
ret = packet_set_ring(sk, &req_u, 0,
optname == PACKET_TX_RING);
}
release_sock(sk);
return ret;
}
case PACKET_COPY_THRESH:
{
@ -4208,8 +4216,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
lock_sock(sk);
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@ -4347,7 +4353,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
release_sock(sk);
return err;
}

View File

@ -1135,3 +1135,4 @@ module_exit(qrtr_proto_fini);
MODULE_DESCRIPTION("Qualcomm IPC-router driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_NETPROTO(PF_QIPCRTR);

View File

@ -556,44 +556,47 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
addr->v6.sin6_scope_id = 0;
}
static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2)
{
if (addr1->sa.sa_family != addr2->sa.sa_family) {
if (addr1->sa.sa_family == AF_INET &&
addr2->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
addr2->v6.sin6_addr.s6_addr32[3] ==
addr1->v4.sin_addr.s_addr)
return 1;
if (addr2->sa.sa_family == AF_INET &&
addr1->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
addr1->v6.sin6_addr.s6_addr32[3] ==
addr2->v4.sin_addr.s_addr)
return 1;
return 0;
}
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
/* If this is a linklocal address, compare the scope_id. */
if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
return 0;
return 1;
}
/* Compare addresses exactly.
* v4-mapped-v6 is also in consideration.
*/
static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2)
{
if (addr1->sa.sa_family != addr2->sa.sa_family) {
if (addr1->sa.sa_family == AF_INET &&
addr2->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
if (addr2->v6.sin6_port == addr1->v4.sin_port &&
addr2->v6.sin6_addr.s6_addr32[3] ==
addr1->v4.sin_addr.s_addr)
return 1;
}
if (addr2->sa.sa_family == AF_INET &&
addr1->sa.sa_family == AF_INET6 &&
ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
if (addr1->v6.sin6_port == addr2->v4.sin_port &&
addr1->v6.sin6_addr.s6_addr32[3] ==
addr2->v4.sin_addr.s_addr)
return 1;
}
return 0;
}
if (addr1->v6.sin6_port != addr2->v6.sin6_port)
return 0;
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
/* If this is a linklocal address, compare the scope_id. */
if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
(addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
return 0;
}
}
return 1;
return __sctp_v6_cmp_addr(addr1, addr2) &&
addr1->v6.sin6_port == addr2->v6.sin6_port;
}
/* Initialize addr struct to INADDR_ANY. */
@ -875,8 +878,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2,
struct sctp_sock *opt)
{
struct sctp_af *af1, *af2;
struct sock *sk = sctp_opt2sk(opt);
struct sctp_af *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
@ -892,10 +895,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
return 1;
if (addr1->sa.sa_family != addr2->sa.sa_family)
return 0;
return af1->cmp_addr(addr1, addr2);
return __sctp_v6_cmp_addr(addr1, addr2);
}
/* Verify that the provided sockaddr looks bindable. Common verification,

View File

@ -1259,14 +1259,12 @@ static int smc_shutdown(struct socket *sock, int how)
rc = smc_close_shutdown_write(smc);
break;
case SHUT_RD:
if (sk->sk_state == SMC_LISTEN)
rc = smc_close_active(smc);
else
rc = 0;
/* nothing more to do because peer is not involved */
rc = 0;
/* nothing more to do because peer is not involved */
break;
}
rc1 = kernel_sock_shutdown(smc->clcsock, how);
if (smc->clcsock)
rc1 = kernel_sock_shutdown(smc->clcsock, how);
/* map sock_shutdown_cmd constants to sk_shutdown value range */
sk->sk_shutdown |= how + 1;

View File

@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
strp_start_timer(strp, timeo);
}
stm->accum_len += cand_len;
strp->need_bytes = stm->strp.full_len -
stm->accum_len;
stm->accum_len += cand_len;
stm->early_eaten = cand_len;
STRP_STATS_ADD(strp->stats.bytes, cand_len);
desc->count = 0; /* Stop reading socket */
@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* Hurray, we have a new message! */
cancel_delayed_work(&strp->msg_timer_work);
strp->skb_head = NULL;
strp->need_bytes = 0;
STRP_STATS_INCR(strp->stats.msgs);
/* Give skb to upper layer */
@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp)
return;
if (strp->need_bytes) {
if (strp_peek_len(strp) >= strp->need_bytes)
strp->need_bytes = 0;
else
if (strp_peek_len(strp) < strp->need_bytes)
return;
}

View File

@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
if (ret || !mon)
return -EINVAL;
return 0;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MON_GET);

View File

@ -241,7 +241,8 @@ err:
static struct publication *tipc_service_remove_publ(struct net *net,
struct tipc_service *sc,
u32 lower, u32 upper,
u32 node, u32 key)
u32 node, u32 key,
struct service_range **rng)
{
struct tipc_subscription *sub, *tmp;
struct service_range *sr;
@ -275,19 +276,15 @@ static struct publication *tipc_service_remove_publ(struct net *net,
list_del(&p->all_publ);
list_del(&p->local_publ);
/* Remove service range item if this was its last publication */
if (list_empty(&sr->all_publ)) {
if (list_empty(&sr->all_publ))
last = true;
rb_erase(&sr->tree_node, &sc->ranges);
kfree(sr);
}
/* Notify any waiting subscriptions */
list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
tipc_sub_report_overlap(sub, p->lower, p->upper, TIPC_WITHDRAWN,
p->port, p->node, p->scope, last);
}
*rng = sr;
return p;
}
@ -379,13 +376,20 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
u32 node, u32 key)
{
struct tipc_service *sc = tipc_service_find(net, type);
struct service_range *sr = NULL;
struct publication *p = NULL;
if (!sc)
return NULL;
spin_lock_bh(&sc->lock);
p = tipc_service_remove_publ(net, sc, lower, upper, node, key);
p = tipc_service_remove_publ(net, sc, lower, upper, node, key, &sr);
/* Remove service range item if this was its last publication */
if (sr && list_empty(&sr->all_publ)) {
rb_erase(&sr->tree_node, &sc->ranges);
kfree(sr);
}
/* Delete service item if this no more publications and subscriptions */
if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
@ -665,13 +669,14 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
*/
void tipc_nametbl_subscribe(struct tipc_subscription *sub)
bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
{
struct name_table *nt = tipc_name_table(sub->net);
struct tipc_net *tn = tipc_net(sub->net);
struct tipc_subscr *s = &sub->evt.s;
u32 type = tipc_sub_read(s, seq.type);
struct tipc_service *sc;
bool res = true;
spin_lock_bh(&tn->nametbl_lock);
sc = tipc_service_find(sub->net, type);
@ -685,8 +690,10 @@ void tipc_nametbl_subscribe(struct tipc_subscription *sub)
pr_warn("Failed to subscribe for {%u,%u,%u}\n", type,
tipc_sub_read(s, seq.lower),
tipc_sub_read(s, seq.upper));
res = false;
}
spin_unlock_bh(&tn->nametbl_lock);
return res;
}
/**
@ -744,16 +751,17 @@ int tipc_nametbl_init(struct net *net)
static void tipc_service_delete(struct net *net, struct tipc_service *sc)
{
struct service_range *sr, *tmpr;
struct publication *p, *tmpb;
struct publication *p, *tmp;
spin_lock_bh(&sc->lock);
rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) {
list_for_each_entry_safe(p, tmpb,
&sr->all_publ, all_publ) {
list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) {
tipc_service_remove_publ(net, sc, p->lower, p->upper,
p->node, p->key);
p->node, p->key, &sr);
kfree_rcu(p, rcu);
}
rb_erase(&sr->tree_node, &sc->ranges);
kfree(sr);
}
hlist_del_init_rcu(&sc->service_list);
spin_unlock_bh(&sc->lock);

View File

@ -126,7 +126,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
u32 lower, u32 upper,
u32 node, u32 key);
void tipc_nametbl_subscribe(struct tipc_subscription *s);
bool tipc_nametbl_subscribe(struct tipc_subscription *s);
void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(struct net *net);
void tipc_nametbl_stop(struct net *net);

View File

@ -252,6 +252,8 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
u64 *w0 = (u64 *)&node_id[0];
u64 *w1 = (u64 *)&node_id[8];
if (!attrs[TIPC_NLA_NET_NODEID_W1])
return -EINVAL;
*w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
*w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
tipc_net_init(net, node_id, 0);

View File

@ -79,7 +79,10 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_NET_ID] = { .type = NLA_U32 }
[TIPC_NLA_NET_ID] = { .type = NLA_U32 },
[TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
[TIPC_NLA_NET_NODEID] = { .type = NLA_U64 },
[TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 },
};
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {

View File

@ -2232,8 +2232,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
u32 prev_bearer = cb->args[0];
struct tipc_nl_msg msg;
int bearer_id;
int err;
int i;
if (prev_bearer == MAX_BEARERS)
return 0;
@ -2243,16 +2243,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rtnl_lock();
for (i = prev_bearer; i < MAX_BEARERS; i++) {
prev_bearer = i;
for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
if (err)
goto out;
break;
}
out:
rtnl_unlock();
cb->args[0] = prev_bearer;
cb->args[0] = bearer_id;
return skb->len;
}

View File

@ -1278,7 +1278,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_name_seq *seq;
struct sk_buff_head pkts;
u32 dnode, dport;
u32 dport, dnode = 0;
u32 type, inst;
int mtu, rc;
@ -1348,6 +1348,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
msg_set_destnode(hdr, dnode);
msg_set_destport(hdr, dest->addr.id.ref);
msg_set_hdr_sz(hdr, BASIC_H_SIZE);
} else {
return -EINVAL;
}
/* Block or return if destination link is congested */

View File

@ -153,7 +153,10 @@ struct tipc_subscription *tipc_sub_subscribe(struct net *net,
memcpy(&sub->evt.s, s, sizeof(*s));
spin_lock_init(&sub->lock);
kref_init(&sub->kref);
tipc_nametbl_subscribe(sub);
if (!tipc_nametbl_subscribe(sub)) {
kfree(sub);
return NULL;
}
timer_setup(&sub->timer, tipc_sub_timeout, 0);
timeout = tipc_sub_read(&sub->evt.s, timeout);
if (timeout != TIPC_WAIT_FOREVER)

View File

@ -41,6 +41,8 @@
#include <net/strparser.h>
#include <net/tls.h>
#define MAX_IV_SIZE TLS_CIPHER_AES_GCM_128_IV_SIZE
static int tls_do_decryption(struct sock *sk,
struct scatterlist *sgin,
struct scatterlist *sgout,
@ -673,7 +675,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + tls_ctx->rx.iv_size];
char iv[TLS_CIPHER_AES_GCM_128_SALT_SIZE + MAX_IV_SIZE];
struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
struct scatterlist *sgin = &sgin_arr[0];
struct strp_msg *rxm = strp_msg(skb);
@ -1094,6 +1096,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_priv;
}
/* Sanity-check the IV size for stack allocations. */
if (iv_size > MAX_IV_SIZE) {
rc = -EINVAL;
goto free_priv;
}
cctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
cctx->tag_size = tag_size;
cctx->overhead_size = cctx->prepend_size + cctx->tag_size;

View File

@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void)
}
EXPORT_SYMBOL_GPL(vsock_core_get_transport);
static void __exit vsock_exit(void)
{
/* Do nothing. This function makes this module removable. */
}
module_init(vsock_init_tables);
module_exit(vsock_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Socket Family");

View File

@ -5,7 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh
TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh
TEST_GEN_FILES = socket
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa