1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull more networking fixes from David Miller:

 1) Fix brcmfmac build with older gcc, from Arend van Spriel.

 2) IRQ values unintentionally truncated to u8 in mlx5 driver, from
    Doron Tsur.

 3) Fix build warnings wrt tcp cgroup changes, from Geert Uytterhoeven.

 4) Limit deep recursion in ovs stack, from Hannes Frederic Sowa.

 5) at803x phy driver bug fixes from, Martin Blumenstingl.

 6) Fix TSO handling in hns driver, from Daode Huang

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (22 commits)
  ovs: limit ovs recursions in ovs_execute_actions to not corrupt stack
  team: Replace rcu_read_lock with a mutex in team_vlan_rx_kill_vid
  net: hns: bug fix about hisilicon TSO BD mode
  brcmfmac: fix BRCMF_FW_NVRAM_DEF macro for older gcc compilers
  net: phy: at803x: Add the interrupt register bit definitions
  net: phy: at803x: Clean up duplicate register definitions
  net: phy: at803x: Allow specifying the RGMII RX clock delay via phy mode
  net: phy: at803x: Don't set gbit features for the AR8030 phy
  arm64: bpf: add extra pass to handle faulty codegen
  arm64: insn: remove BUG_ON from codegen
  sctp: the temp asoc's transports should not be hashed/unhashed
  net/mlx5_core: Fix trimming down IRQ number
  tcp_memcontrol: Forward declare cgroup_subsys and mem_cgroup stucts
  batman-adv: Drop immediate orig_node free function
  batman-adv: Drop immediate batadv_hard_iface free function
  batman-adv: Drop immediate neigh_ifinfo free function
  batman-adv: Drop immediate batadv_hardif_neigh_node free function
  batman-adv: Drop immediate batadv_neigh_node free function
  batman-adv: Drop immediate batadv_orig_ifinfo free function
  batman-adv: Avoid recursive call_rcu for batadv_nc_node
  ...
hifive-unleashed-5.1
Linus Torvalds 2016-01-18 12:35:14 -08:00
commit 48f58ba9cb
24 changed files with 405 additions and 295 deletions

View File

@ -2,7 +2,7 @@
* Copyright (C) 2013 Huawei Ltd.
* Author: Jiang Liu <liuj97@gmail.com>
*
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -363,6 +363,9 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
u32 immlo, immhi, mask;
int shift;
if (insn == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_IMM_ADR:
shift = 0;
@ -377,7 +380,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
type);
return 0;
return AARCH64_BREAK_FAULT;
}
}
@ -394,9 +397,12 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
{
int shift;
if (insn == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
pr_err("%s: unknown register encoding %d\n", __func__, reg);
return 0;
return AARCH64_BREAK_FAULT;
}
switch (type) {
@ -417,7 +423,7 @@ static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
default:
pr_err("%s: unknown register type encoding %d\n", __func__,
type);
return 0;
return AARCH64_BREAK_FAULT;
}
insn &= ~(GENMASK(4, 0) << shift);
@ -446,7 +452,7 @@ static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
break;
default:
pr_err("%s: unknown size encoding %d\n", __func__, type);
return 0;
return AARCH64_BREAK_FAULT;
}
insn &= ~GENMASK(31, 30);
@ -460,14 +466,17 @@ static inline long branch_imm_common(unsigned long pc, unsigned long addr,
{
long offset;
/*
* PC: A 64-bit Program Counter holding the address of the current
* instruction. A64 instructions must be word-aligned.
*/
BUG_ON((pc & 0x3) || (addr & 0x3));
if ((pc & 0x3) || (addr & 0x3)) {
pr_err("%s: A64 instructions must be word aligned\n", __func__);
return range;
}
offset = ((long)addr - (long)pc);
BUG_ON(offset < -range || offset >= range);
if (offset < -range || offset >= range) {
pr_err("%s: offset out of range\n", __func__);
return range;
}
return offset;
}
@ -484,6 +493,8 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
* texts are within +/-128M.
*/
offset = branch_imm_common(pc, addr, SZ_128M);
if (offset >= SZ_128M)
return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_BRANCH_LINK:
@ -493,7 +504,7 @@ u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_b_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -510,6 +521,8 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
long offset;
offset = branch_imm_common(pc, addr, SZ_1M);
if (offset >= SZ_1M)
return AARCH64_BREAK_FAULT;
switch (type) {
case AARCH64_INSN_BRANCH_COMP_ZERO:
@ -519,7 +532,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_cbnz_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -530,7 +543,7 @@ u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -550,7 +563,10 @@ u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
insn = aarch64_insn_get_bcond_value();
BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL);
if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
pr_err("%s: unknown condition encoding %d\n", __func__, cond);
return AARCH64_BREAK_FAULT;
}
insn |= cond;
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
@ -583,7 +599,7 @@ u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
insn = aarch64_insn_get_ret_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown branch encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -606,7 +622,7 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
insn = aarch64_insn_get_str_reg_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown load/store encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -645,26 +661,30 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
insn = aarch64_insn_get_stp_post_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown load/store encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
/* offset must be multiples of 4 in the range [-256, 252] */
BUG_ON(offset & 0x3);
BUG_ON(offset < -256 || offset > 252);
if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
__func__, offset);
return AARCH64_BREAK_FAULT;
}
shift = 2;
break;
case AARCH64_INSN_VARIANT_64BIT:
/* offset must be multiples of 8 in the range [-512, 504] */
BUG_ON(offset & 0x7);
BUG_ON(offset < -512 || offset > 504);
if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
__func__, offset);
return AARCH64_BREAK_FAULT;
}
shift = 3;
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -702,7 +722,7 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
insn = aarch64_insn_get_subs_imm_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -713,11 +733,14 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
BUG_ON(imm & ~(SZ_4K - 1));
if (imm & ~(SZ_4K - 1)) {
pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
@ -746,7 +769,7 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
insn = aarch64_insn_get_sbfm_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -759,12 +782,18 @@ u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
mask = GENMASK(5, 0);
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
BUG_ON(immr & ~mask);
BUG_ON(imms & ~mask);
if (immr & ~mask) {
pr_err("%s: invalid immr encoding %d\n", __func__, immr);
return AARCH64_BREAK_FAULT;
}
if (imms & ~mask) {
pr_err("%s: invalid imms encoding %d\n", __func__, imms);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
@ -793,23 +822,33 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
insn = aarch64_insn_get_movn_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown movewide encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
BUG_ON(imm & ~(SZ_64K - 1));
if (imm & ~(SZ_64K - 1)) {
pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift != 0 && shift != 16);
if (shift != 0 && shift != 16) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift != 0 && shift != 16 && shift != 32 &&
shift != 48);
if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -843,20 +882,28 @@ u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
insn = aarch64_insn_get_subs_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift & ~(SZ_32 - 1));
if (shift & ~(SZ_32 - 1)) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift & ~(SZ_64 - 1));
if (shift & ~(SZ_64 - 1)) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -885,11 +932,15 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
insn = aarch64_insn_get_rev32_value();
break;
case AARCH64_INSN_DATA1_REVERSE_64:
BUG_ON(variant != AARCH64_INSN_VARIANT_64BIT);
if (variant != AARCH64_INSN_VARIANT_64BIT) {
pr_err("%s: invalid variant for reverse64 %d\n",
__func__, variant);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_rev64_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown data1 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -900,7 +951,7 @@ u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -937,7 +988,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
insn = aarch64_insn_get_rorv_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown data2 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -948,7 +999,7 @@ u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -976,7 +1027,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
insn = aarch64_insn_get_msub_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown data3 encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
@ -987,7 +1038,7 @@ u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
insn |= AARCH64_INSN_SF_BIT;
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}
@ -1037,20 +1088,28 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
insn = aarch64_insn_get_bics_value();
break;
default:
BUG_ON(1);
pr_err("%s: unknown logical encoding %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
switch (variant) {
case AARCH64_INSN_VARIANT_32BIT:
BUG_ON(shift & ~(SZ_32 - 1));
if (shift & ~(SZ_32 - 1)) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
case AARCH64_INSN_VARIANT_64BIT:
insn |= AARCH64_INSN_SF_BIT;
BUG_ON(shift & ~(SZ_64 - 1));
if (shift & ~(SZ_64 - 1)) {
pr_err("%s: invalid shift encoding %d\n", __func__,
shift);
return AARCH64_BREAK_FAULT;
}
break;
default:
BUG_ON(1);
pr_err("%s: unknown variant encoding %d\n", __func__, variant);
return AARCH64_BREAK_FAULT;
}

View File

@ -1,7 +1,7 @@
/*
* BPF JIT compiler for ARM64
*
* Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -737,6 +737,20 @@ static int build_body(struct jit_ctx *ctx)
return 0;
}
static int validate_code(struct jit_ctx *ctx)
{
int i;
for (i = 0; i < ctx->idx; i++) {
u32 a64_insn = le32_to_cpu(ctx->image[i]);
if (a64_insn == AARCH64_BREAK_FAULT)
return -1;
}
return 0;
}
static inline void bpf_flush_icache(void *start, void *end)
{
flush_icache_range((unsigned long)start, (unsigned long)end);
@ -799,6 +813,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
build_epilogue(&ctx);
/* 3. Extra pass to validate JITed code. */
if (validate_code(&ctx)) {
bpf_jit_binary_free(header);
goto out;
}
/* And we're done. */
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, image_size, 2, ctx.image);

View File

@ -756,7 +756,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int uninitialized_var(index);
int uninitialized_var(inlen);
int cqe_size;
int irqn;
unsigned int irqn;
int eqn;
int err;

View File

@ -542,39 +542,50 @@ bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
}
static void
bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
u32 sop_ci, u32 nvecs, u32 last_fraglen)
bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
{
struct bna_rcb *rcb;
struct bnad *bnad;
u32 ci, vec, len, totlen = 0;
struct bnad_rx_unmap_q *unmap_q;
struct bnad_rx_unmap *unmap;
struct bna_cq_entry *cq, *cmpl;
u32 ci, pi, totlen = 0;
cq = ccb->sw_q;
pi = ccb->producer_index;
cmpl = &cq[pi];
rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
unmap_q = rcb->unmap_q;
bnad = rcb->bnad;
ci = rcb->consumer_index;
/* prefetch header */
prefetch(page_address(unmap_q->unmap[sop_ci].page) +
unmap_q->unmap[sop_ci].page_offset);
prefetch(page_address(unmap_q->unmap[ci].page) +
unmap_q->unmap[ci].page_offset);
while (nvecs--) {
struct bnad_rx_unmap *unmap;
u32 len;
for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
unmap = &unmap_q->unmap[ci];
BNA_QE_INDX_INC(ci, rcb->q_depth);
dma_unmap_page(&bnad->pcidev->dev,
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
dma_unmap_addr(&unmap->vector, dma_addr),
unmap->vector.len, DMA_FROM_DEVICE);
len = (vec == nvecs) ?
last_fraglen : unmap->vector.len;
len = ntohs(cmpl->length);
skb->truesize += unmap->vector.len;
totlen += len;
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
unmap->page, unmap->page_offset, len);
unmap->page, unmap->page_offset, len);
unmap->page = NULL;
unmap->vector.len = 0;
BNA_QE_INDX_INC(pi, ccb->q_depth);
cmpl = &cq[pi];
}
skb->len += totlen;
@ -704,7 +715,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
bnad_cq_setup_skb(bnad, skb, unmap, len);
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
bnad_cq_setup_skb_frags(ccb, skb, nvecs);
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;

View File

@ -369,8 +369,17 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
HNS_RCB_COMMON_ENDIAN);
dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
} else {
dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
RCB_COM_CFG_FNA_B, false);
dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
RCB_COM_CFG_FA_B, true);
dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
}
return 0;
}

View File

@ -54,6 +54,9 @@ struct rcb_common_cb;
#define HNS_DUMP_REG_NUM 500
#define HNS_STATIC_REG_NUM 12
#define HNS_TSO_MODE_8BD_32K 1
#define HNS_TSO_MDOE_4BD_16K 0
enum rcb_int_flag {
RCB_INT_FLAG_TX = 0x1,
RCB_INT_FLAG_RX = (0x1 << 1),

View File

@ -363,6 +363,8 @@
#define RCB_COM_CFG_FA_REG 0x3C
#define RCB_COM_CFG_PKT_TC_BP_REG 0x40
#define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44
#define RCBV2_COM_CFG_USER_REG 0x30
#define RCBV2_COM_CFG_TSO_MODE_REG 0x50
#define RCB_COM_INTMSK_TX_PKT_REG 0x3A0
#define RCB_COM_RINT_TX_PKT_REG 0x3A8
@ -860,6 +862,9 @@
#define PPE_COMMON_CNT_CLR_CE_B 0
#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
#define RCB_COM_TSO_MODE_B 0
#define RCB_COM_CFG_FNA_B 1
#define RCB_COM_CFG_FA_B 0
#define GMAC_DUPLEX_TYPE_B 0

View File

@ -752,7 +752,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
int irqn;
unsigned int irqn;
int err;
u32 i;
@ -806,7 +806,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
void *in;
void *cqc;
int inlen;
int irqn_not_used;
unsigned int irqn_not_used;
int eqn;
int err;
@ -1517,7 +1517,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
int irqn;
unsigned int irqn;
int err;
err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,

View File

@ -585,7 +585,8 @@ static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
mlx5_irq_clear_affinity_hint(mdev, i);
}
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n;

View File

@ -20,10 +20,21 @@
#include <linux/gpio/consumer.h>
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
#define AT803X_INTR_ENABLE_WOL BIT(0)
#define AT803X_INTR_STATUS 0x13
#define AT803X_SMART_SPEED 0x14
#define AT803X_LED_CONTROL 0x18
#define AT803X_WOL_ENABLE 0x01
#define AT803X_DEVICE_ADDR 0x03
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
@ -31,13 +42,15 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
#define AT803X_INER 0x0012
#define AT803X_INER_INIT 0xec00
#define AT803X_INSR 0x0013
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
@ -61,6 +74,46 @@ struct at803x_context {
u16 led_control;
};
static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
{
int ret;
ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
if (ret < 0)
return ret;
return phy_read(phydev, AT803X_DEBUG_DATA);
}
static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
u16 clear, u16 set)
{
u16 val;
int ret;
ret = at803x_debug_reg_read(phydev, reg);
if (ret < 0)
return ret;
val = ret & 0xffff;
val &= ~clear;
val |= set;
return phy_write(phydev, AT803X_DEBUG_DATA, val);
}
static inline int at803x_enable_rx_delay(struct phy_device *phydev)
{
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
AT803X_DEBUG_RX_CLK_DLY_EN);
}
static inline int at803x_enable_tx_delay(struct phy_device *phydev)
{
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
AT803X_DEBUG_TX_CLK_DLY_EN);
}
/* save relevant PHY registers to private copy */
static void at803x_context_save(struct phy_device *phydev,
struct at803x_context *context)
@ -119,14 +172,14 @@ static int at803x_set_wol(struct phy_device *phydev,
}
value = phy_read(phydev, AT803X_INTR_ENABLE);
value |= AT803X_WOL_ENABLE;
value |= AT803X_INTR_ENABLE_WOL;
ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
if (ret)
return ret;
value = phy_read(phydev, AT803X_INTR_STATUS);
} else {
value = phy_read(phydev, AT803X_INTR_ENABLE);
value &= (~AT803X_WOL_ENABLE);
value &= (~AT803X_INTR_ENABLE_WOL);
ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
if (ret)
return ret;
@ -145,7 +198,7 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->wolopts = 0;
value = phy_read(phydev, AT803X_INTR_ENABLE);
if (value & AT803X_WOL_ENABLE)
if (value & AT803X_INTR_ENABLE_WOL)
wol->wolopts |= WAKE_MAGIC;
}
@ -157,7 +210,7 @@ static int at803x_suspend(struct phy_device *phydev)
mutex_lock(&phydev->lock);
value = phy_read(phydev, AT803X_INTR_ENABLE);
wol_enabled = value & AT803X_WOL_ENABLE;
wol_enabled = value & AT803X_INTR_ENABLE_WOL;
value = phy_read(phydev, MII_BMCR);
@ -217,14 +270,17 @@ static int at803x_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
ret = phy_write(phydev, AT803X_DEBUG_ADDR,
AT803X_DEBUG_SYSTEM_MODE_CTRL);
if (ret)
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
ret = at803x_enable_rx_delay(phydev);
if (ret < 0)
return ret;
ret = phy_write(phydev, AT803X_DEBUG_DATA,
AT803X_DEBUG_RGMII_TX_CLK_DLY);
if (ret)
}
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
ret = at803x_enable_tx_delay(phydev);
if (ret < 0)
return ret;
}
@ -235,7 +291,7 @@ static int at803x_ack_interrupt(struct phy_device *phydev)
{
int err;
err = phy_read(phydev, AT803X_INSR);
err = phy_read(phydev, AT803X_INTR_STATUS);
return (err < 0) ? err : 0;
}
@ -245,13 +301,19 @@ static int at803x_config_intr(struct phy_device *phydev)
int err;
int value;
value = phy_read(phydev, AT803X_INER);
value = phy_read(phydev, AT803X_INTR_ENABLE);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, AT803X_INER,
value | AT803X_INER_INIT);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
value |= AT803X_INTR_ENABLE_LINK_FAIL;
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
}
else
err = phy_write(phydev, AT803X_INER, 0);
err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
return err;
}
@ -322,7 +384,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,

View File

@ -1872,10 +1872,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
struct team *team = netdev_priv(dev);
struct team_port *port;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
vlan_vid_del(port->dev, proto, vid);
rcu_read_unlock();
mutex_unlock(&team->lock);
return 0;
}

View File

@ -47,8 +47,7 @@ static const char BRCM_ ## fw_nvram_name ## _FIRMWARE_NAME[] = \
BRCMF_FW_DEFAULT_PATH fw; \
static const char BRCM_ ## fw_nvram_name ## _NVRAM_NAME[] = \
BRCMF_FW_DEFAULT_PATH nvram; \
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw); \
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH nvram)
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw);
#define BRCMF_FW_DEF(fw_name, fw) \
static const char BRCM_ ## fw_name ## _FIRMWARE_NAME[] = \

View File

@ -45,7 +45,7 @@ struct mlx5_core_cq {
atomic_t refcount;
struct completion free;
unsigned vector;
int irqn;
unsigned int irqn;
void (*comp) (struct mlx5_core_cq *);
void (*event) (struct mlx5_core_cq *, enum mlx5_event);
struct mlx5_uar *uar;

View File

@ -303,7 +303,7 @@ struct mlx5_eq {
u32 cons_index;
struct mlx5_buf buf;
int size;
u8 irqn;
unsigned int irqn;
u8 eqn;
int nent;
u64 mask;
@ -783,7 +783,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);

View File

@ -1,6 +1,9 @@
#ifndef _TCP_MEMCG_H
#define _TCP_MEMCG_H
struct cgroup_subsys;
struct mem_cgroup;
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void tcp_destroy_cgroup(struct mem_cgroup *memcg);
#endif /* _TCP_MEMCG_H */

View File

@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
}
/* finally deinitialize the claim */
static void batadv_claim_free_rcu(struct rcu_head *rcu)
static void batadv_claim_release(struct batadv_bla_claim *claim)
{
struct batadv_bla_claim *claim;
claim = container_of(rcu, struct batadv_bla_claim, rcu);
batadv_backbone_gw_free_ref(claim->backbone_gw);
kfree(claim);
kfree_rcu(claim, rcu);
}
/* free a claim, call claim_free_rcu if its the last reference */
static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
{
if (atomic_dec_and_test(&claim->refcount))
call_rcu(&claim->rcu, batadv_claim_free_rcu);
batadv_claim_release(claim);
}
/**

View File

@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
}
/**
* batadv_hardif_free_ref_now - decrement the hard interface refcounter and
* possibly free it (without rcu callback)
* @hard_iface: the hard interface to free
*/
static inline void
batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
{
if (atomic_dec_and_test(&hard_iface->refcount))
batadv_hardif_free_rcu(&hard_iface->rcu);
}
static inline struct batadv_hard_iface *
batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
{

View File

@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
}
/**
* batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
* its refcount on the orig_node
* @rcu: rcu pointer of the nc node
* batadv_nc_node_release - release nc_node from lists and queue for free after
* rcu grace period
* @nc_node: the nc node to free
*/
static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
{
struct batadv_nc_node *nc_node;
nc_node = container_of(rcu, struct batadv_nc_node, rcu);
batadv_orig_node_free_ref(nc_node->orig_node);
kfree(nc_node);
kfree_rcu(nc_node, rcu);
}
/**
* batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
* frees it
* batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
* release it
* @nc_node: the nc node to free
*/
static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
{
if (atomic_dec_and_test(&nc_node->refcount))
call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
batadv_nc_node_release(nc_node);
}
/**

View File

@ -163,148 +163,101 @@ err:
}
/**
* batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
* @rcu: rcu pointer of the neigh_ifinfo object
*/
static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
{
struct batadv_neigh_ifinfo *neigh_ifinfo;
neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
kfree(neigh_ifinfo);
}
/**
* batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
* the neigh_ifinfo (without rcu callback)
* batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
* free after rcu grace period
* @neigh_ifinfo: the neigh_ifinfo object to release
*/
static void
batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
{
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
kfree_rcu(neigh_ifinfo, rcu);
}
/**
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
* the neigh_ifinfo
* @neigh_ifinfo: the neigh_ifinfo object to release
*/
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
{
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
batadv_neigh_ifinfo_release(neigh_ifinfo);
}
/**
* batadv_hardif_neigh_free_rcu - free the hardif neigh_node
* @rcu: rcu pointer of the neigh_node
*/
static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu)
{
struct batadv_hardif_neigh_node *hardif_neigh;
hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu);
batadv_hardif_free_ref_now(hardif_neigh->if_incoming);
kfree(hardif_neigh);
}
/**
* batadv_hardif_neigh_free_now - decrement the hardif neighbors refcounter
* and possibly free it (without rcu callback)
* batadv_hardif_neigh_release - release hardif neigh node from lists and
* queue for free after rcu grace period
* @hardif_neigh: hardif neigh neighbor to free
*/
static void
batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh)
batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
{
if (atomic_dec_and_test(&hardif_neigh->refcount)) {
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
hlist_del_init_rcu(&hardif_neigh->list);
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
hlist_del_init_rcu(&hardif_neigh->list);
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu);
}
batadv_hardif_free_ref(hardif_neigh->if_incoming);
kfree_rcu(hardif_neigh, rcu);
}
/**
* batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
* and possibly free it
* and possibly release it
* @hardif_neigh: hardif neigh neighbor to free
*/
void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
{
if (atomic_dec_and_test(&hardif_neigh->refcount)) {
spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
hlist_del_init_rcu(&hardif_neigh->list);
spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu);
}
if (atomic_dec_and_test(&hardif_neigh->refcount))
batadv_hardif_neigh_release(hardif_neigh);
}
/**
* batadv_neigh_node_free_rcu - free the neigh_node
* @rcu: rcu pointer of the neigh_node
* batadv_neigh_node_release - release neigh_node from lists and queue for
* free after rcu grace period
* @neigh_node: neigh neighbor to free
*/
static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_hardif_neigh_node *hardif_neigh;
struct batadv_neigh_ifinfo *neigh_ifinfo;
struct batadv_algo_ops *bao;
neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
&neigh_node->ifinfo_list, list) {
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
}
hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
neigh_node->addr);
if (hardif_neigh) {
/* batadv_hardif_neigh_get() increases refcount too */
batadv_hardif_neigh_free_now(hardif_neigh);
batadv_hardif_neigh_free_now(hardif_neigh);
batadv_hardif_neigh_free_ref(hardif_neigh);
batadv_hardif_neigh_free_ref(hardif_neigh);
}
if (bao->bat_neigh_free)
bao->bat_neigh_free(neigh_node);
batadv_hardif_free_ref_now(neigh_node->if_incoming);
batadv_hardif_free_ref(neigh_node->if_incoming);
kfree(neigh_node);
}
/**
* batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
* and possibly free it (without rcu callback)
* @neigh_node: neigh neighbor to free
*/
static void
batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
{
if (atomic_dec_and_test(&neigh_node->refcount))
batadv_neigh_node_free_rcu(&neigh_node->rcu);
kfree_rcu(neigh_node, rcu);
}
/**
* batadv_neigh_node_free_ref - decrement the neighbors refcounter
* and possibly free it
* and possibly release it
* @neigh_node: neigh neighbor to free
*/
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
{
if (atomic_dec_and_test(&neigh_node->refcount))
call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
batadv_neigh_node_release(neigh_node);
}
/**
@ -733,79 +686,48 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
}
/**
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
* @rcu: rcu pointer of the orig_ifinfo object
* batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
* free after rcu grace period
* @orig_ifinfo: the orig_ifinfo object to release
*/
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
{
struct batadv_orig_ifinfo *orig_ifinfo;
struct batadv_neigh_node *router;
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
/* this is the last reference to this object */
router = rcu_dereference_protected(orig_ifinfo->router, true);
if (router)
batadv_neigh_node_free_ref_now(router);
kfree(orig_ifinfo);
batadv_neigh_node_free_ref(router);
kfree_rcu(orig_ifinfo, rcu);
}
/**
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
* the orig_ifinfo (without rcu callback)
* @orig_ifinfo: the orig_ifinfo object to release
*/
static void
batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
{
if (atomic_dec_and_test(&orig_ifinfo->refcount))
batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
}
/**
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
* the orig_ifinfo
* @orig_ifinfo: the orig_ifinfo object to release
*/
void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
{
if (atomic_dec_and_test(&orig_ifinfo->refcount))
call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
batadv_orig_ifinfo_release(orig_ifinfo);
}
/**
* batadv_orig_node_free_rcu - free the orig_node
* @rcu: rcu pointer of the orig_node
*/
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_orig_node *orig_node;
struct batadv_orig_ifinfo *orig_ifinfo;
orig_node = container_of(rcu, struct batadv_orig_node, rcu);
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_free_ref_now(neigh_node);
}
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
&orig_node->ifinfo_list, list) {
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
}
spin_unlock_bh(&orig_node->neigh_list_lock);
batadv_mcast_purge_orig(orig_node);
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
batadv_frag_purge_orig(orig_node, NULL);
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
@ -815,26 +737,48 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
kfree(orig_node);
}
/**
* batadv_orig_node_release - release orig_node from lists and queue for
* free after rcu grace period
* @orig_node: the orig node to free
*/
static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
{
struct hlist_node *node_tmp;
struct batadv_neigh_node *neigh_node;
struct batadv_orig_ifinfo *orig_ifinfo;
spin_lock_bh(&orig_node->neigh_list_lock);
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_free_ref(neigh_node);
}
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
&orig_node->ifinfo_list, list) {
hlist_del_rcu(&orig_ifinfo->list);
batadv_orig_ifinfo_free_ref(orig_ifinfo);
}
spin_unlock_bh(&orig_node->neigh_list_lock);
/* Free nc_nodes */
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
/**
* batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
* schedule an rcu callback for freeing it
* release it
* @orig_node: the orig node to free
*/
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
/**
* batadv_orig_node_free_ref_now - decrement the orig node refcounter and
* possibly free it (without rcu callback)
* @orig_node: the orig node to free
*/
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
batadv_orig_node_free_rcu(&orig_node->rcu);
batadv_orig_node_release(orig_node);
}
void batadv_originator_free(struct batadv_priv *bat_priv)

View File

@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
void batadv_originator_free(struct batadv_priv *bat_priv);
void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const u8 *addr);
struct batadv_hardif_neigh_node *

View File

@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
return count;
}
static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
{
struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
/* We are in an rcu callback here, therefore we cannot use
* batadv_orig_node_free_ref() and its call_rcu():
* An rcu_barrier() wouldn't wait for that to finish
*/
batadv_orig_node_free_ref_now(orig_entry->orig_node);
kfree(orig_entry);
}
/**
* batadv_tt_local_size_mod - change the size by v of the local table identified
* by vid
@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
batadv_tt_global_size_mod(orig_node, vid, -1);
}
/**
* batadv_tt_orig_list_entry_release - release tt orig entry from lists and
* queue for free after rcu grace period
* @orig_entry: tt orig entry to be free'd
*/
static void
batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
{
batadv_orig_node_free_ref(orig_entry->orig_node);
kfree_rcu(orig_entry, rcu);
}
static void
batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
if (!atomic_dec_and_test(&orig_entry->refcount))
return;
call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
batadv_tt_orig_list_entry_release(orig_entry);
}
/**

View File

@ -1160,17 +1160,26 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
int level = this_cpu_read(exec_actions_level);
int err;
static const int ovs_recursion_limit = 5;
int err, level;
level = __this_cpu_inc_return(exec_actions_level);
if (unlikely(level > ovs_recursion_limit)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
kfree_skb(skb);
err = -ENETDOWN;
goto out;
}
this_cpu_inc(exec_actions_level);
err = do_execute_actions(dp, skb, key,
acts->actions, acts->actions_len);
if (!level)
if (level == 1)
process_deferred_actions(dp);
this_cpu_dec(exec_actions_level);
out:
__this_cpu_dec(exec_actions_level);
return err;
}

View File

@ -333,7 +333,7 @@ struct sctp_association *sctp_endpoint_lookup_assoc(
if (!ep->base.bind_addr.port)
goto out;
t = sctp_epaddr_lookup_transport(ep, paddr);
if (!t || t->asoc->temp)
if (!t)
goto out;
*transport = t;

View File

@ -874,6 +874,9 @@ void sctp_hash_transport(struct sctp_transport *t)
{
struct sctp_hash_cmp_arg arg;
if (t->asoc->temp)
return;
arg.ep = t->asoc->ep;
arg.paddr = &t->ipaddr;
arg.net = sock_net(t->asoc->base.sk);
@ -886,6 +889,9 @@ reinsert:
void sctp_unhash_transport(struct sctp_transport *t)
{
if (t->asoc->temp)
return;
rhashtable_remove_fast(&sctp_transport_hashtable, &t->node,
sctp_hash_params);
}
@ -931,7 +937,7 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_transport *t;
t = sctp_addrs_lookup_transport(net, local, peer);
if (!t || t->dead || t->asoc->temp)
if (!t || t->dead)
return NULL;
sctp_association_hold(t->asoc);