1
0
Fork 0

This is the 5.4.85 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl/glK0ACgkQONu9yGCS
 aT5W7A/9Fosi7yNmr9UI/IjjcvDzy2ecA/8Je/WHx9Q5AGUvPhaO9hiciuic3vIQ
 hh8TgKl/8+ZHuiKGXi89G1S7VIeZrCqz55wBpWSgaPsZ9zJzHFh65aSIsoN+Rlmy
 FLHr7BKxchmNgvtVcoO8gpgV2NsxRmJ8+NKWXIRmVzBe5oyyLLOtVsN96htq8jrx
 IrqqtpjVSzlP2enEVPsC0Xw6piK5xaWriQS5W8S1y5awP6Dets+T8CWlBvuoaBCH
 KzISleJF/R5sP4U4+4j6OwEzzPUqxMnCsYrATUOcu/pGGqwRZCVtY4JhxccMCZw6
 Q5gsvbcVTrz3IFyhMI7KL28+YvAOeQ67zOim6ucztgNXDYCMbc+uTxCDRERPr5Pl
 oiCfW42X8Z3Alt8KugDhjh6XYmMVS3u5tOQEaNoPC1mv+WGMJy6Szsq+NgJfbeqC
 8Fszz/1MQtPBBk/wVOvtNybNy+0W4sBGaUq98TtrfnfZtsuM/FrJiCgXTB9pqGxH
 Bq6R8BUsGeFEWzDDe29BoFJBXGpZ8Ox0/LKmBM7z2jWBZldH6DC54LhWaQgsLAMO
 UPGVCtfKNQXnJ/0bO5RSgikD9iTQyFBs5nhreJcXMsTPxI/5wXEkw6MDrvYXLTHE
 C6H6TarzGwlUcicc+UYCVplSTy+4D1sGbH95Rz0cMJHRyojKl8E=
 =9HIQ
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAl/gyMIACgkQ7G51OISz
 Hs0J4g//Y3R4XyO45gq0EtLAQy5rwTb+qO58phDIwC+puIedDTJnLo9IaDB3kyTa
 98++yv4VoWcgLLCsCtgqDube2Csyvx2Dl0JgDMR7L998VXxnVuht3vhUkcd081XR
 EJoNfpBva8uIeqB6LiGg9+jGEc0s5vUCXH3Ijt3ync8vZxrz8PSBruP8AKSOjjbo
 zvRUsCJugYyOrbg2bTswBNTgAOMB+SbtQkOPzQ19/uVzHSGFAUKp41XTVTsbaX62
 9UQQLDfUACvCyGXze50QTC4lRJZNl0PuUhQdz6WSu4DDF1I4QRf5onf39fffPHtU
 XwiCoa0mDwOJLr2An1xITdjQzudWRrp7Bt7JbfjTmcfAa5JPXVnXP12fdJeSm0w2
 ZJoH5A3o2iEq1Q+Fk6FOIEhPLm4ktGHegxo7eZOuBzPGtWTvqqE/ngzyfw/NovGP
 579YtIbY6+2pw3FF4Havv/adRKPcrFZNbiqGgOWpeCB+i7BfJQ4tSNJigtQwLvrf
 yVWJ2e+I9B3z+/HqPKU8OtCWf3R3vT4CvN1bInRFodeSsXUxQraXwKPP/bxOI57C
 TrS5/1pKStntol1IKSCVgXIh66GRt11hPKQHEOcOjG4u0VUSBb9uHoyBo0BTuhXW
 5EOj2hzc/DXye5HmGEQlqnMzSHVg6qn+eKhZKkxCqQLMIjKeAM8=
 =xf26
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.85' into 5.4-2.2.x-imx

This is the 5.4.85 stable release

Conflicts (correct auto-resolve):
- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c:
Ustream commit 318d90218b ("net: stmmac: free tx skb buffer in
stmmac_resume()") overlapped NXP commit dd7c2b79a9 ("MLK-24217 net:
ethernet: stmmac: free tx skb buffer in stmmac_resume()"), causing
double-declaration of the function to be present in the code.
Replace the NXP commit with upstream one.

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
5.4-rM2-2.2.x-imx-squashed
Andrey Zhizhikin 2020-12-21 14:00:33 +00:00
commit 5552df2bf5
44 changed files with 340 additions and 97 deletions

View File

@ -5151,6 +5151,7 @@
device);
j = NO_REPORT_LUNS (don't use report luns
command, uas only);
k = NO_SAME (do not use WRITE_SAME, uas only)
l = NOT_LOCKABLE (don't try to lock and
unlock ejectable media, not on uas);
m = MAX_SECTORS_64 (don't transfer more

View File

@ -420,7 +420,7 @@ If the generation number of the spte does not equal the global generation
number, it will ignore the cached MMIO information and handle the page
fault through the slow path.
Since only 19 bits are used to store generation-number on mmio spte, all
Since only 18 bits are used to store generation-number on mmio spte, all
pages are zapped when there is an overflow.
Unfortunately, a single memory access might access kvm_memslots(kvm) multiple

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 84
SUBLEVEL = 85
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -276,7 +276,6 @@ struct rftype {
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
* @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
* @chunks_bw Total local data moved. Used for bandwidth calculation
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
* @prev_bw The most recent bandwidth in MBps
* @delta_bw Difference between the current and previous bandwidth
@ -285,7 +284,6 @@ struct rftype {
struct mbm_state {
u64 chunks;
u64 prev_msr;
u64 chunks_bw;
u64 prev_bw_msr;
u32 prev_bw;
u32 delta_bw;

View File

@ -280,8 +280,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
return;
chunks = mbm_overflow_count(m->prev_bw_msr, tval);
m->chunks_bw += chunks;
m->chunks = m->chunks_bw;
cur_bw = (chunks * r->mon_scale) >> 20;
if (m->delta_comp)
@ -451,15 +449,14 @@ static void mbm_update(struct rdt_domain *d, int rmid)
}
if (is_mbm_local_enabled()) {
rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
__mon_event_count(rmid, &rr);
/*
* Call the MBA software controller only for the
* control groups and when user has enabled
* the software controller explicitly.
*/
if (!is_mba_sc(NULL))
__mon_event_count(rmid, &rr);
else
if (is_mba_sc(NULL))
mbm_bw_count(rmid, &rr);
}
}

View File

@ -407,11 +407,11 @@ static inline bool is_access_track_spte(u64 spte)
}
/*
* Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
* Due to limited space in PTEs, the MMIO generation is a 18 bit subset of
* the memslots generation and is derived as follows:
*
* Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
* Bits 9-18 of the MMIO generation are propagated to spte bits 52-61
* Bits 9-17 of the MMIO generation are propagated to spte bits 54-62
*
* The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
* the MMIO generation number, as doing so would require stealing a bit from
@ -420,18 +420,29 @@ static inline bool is_access_track_spte(u64 spte)
* requires a full MMU zap). The flag is instead explicitly queried when
* checking for MMIO spte cache hits.
*/
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(17, 0)
#define MMIO_SPTE_GEN_LOW_START 3
#define MMIO_SPTE_GEN_LOW_END 11
#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT
#define MMIO_SPTE_GEN_HIGH_END 62
#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
MMIO_SPTE_GEN_HIGH_START)
#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
/* remember to adjust the comment above as well if you change these */
static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9);
#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
@ -439,8 +450,8 @@ static u64 generation_mmio_spte_mask(u64 gen)
WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
mask = (gen << MMIO_SPTE_GEN_LOW_START) & MMIO_SPTE_GEN_LOW_MASK;
mask |= (gen << MMIO_SPTE_GEN_HIGH_START) & MMIO_SPTE_GEN_HIGH_MASK;
mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
return mask;
}
@ -448,8 +459,8 @@ static u64 get_mmio_spte_generation(u64 spte)
{
u64 gen;
gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_START;
gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_START;
gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT;
gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT;
return gen;
}

View File

@ -141,8 +141,8 @@ static const struct {
{ ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
{ ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
{ ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
{ ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" },
{ ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" },
{ ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
{ ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
{ ENETC_PM0_ROVR, "MAC rx oversized packets" },
{ ENETC_PM0_RJBR, "MAC rx jabber packets" },
{ ENETC_PM0_RFRG, "MAC rx fragment packets" },
@ -161,9 +161,13 @@ static const struct {
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
{ ENETC_PM0_TPKT, "MAC tx packets" },
{ ENETC_PM0_TUND, "MAC tx undersized packets" },
{ ENETC_PM0_T64, "MAC tx 64 byte packets" },
{ ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
{ ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
{ ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
{ ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
{ ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" },
{ ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
{ ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
{ ENETC_PM0_TCNP, "MAC tx control packets" },
{ ENETC_PM0_TDFR, "MAC tx deferred packets" },
{ ENETC_PM0_TMCOL, "MAC tx multiple collisions" },

View File

@ -278,8 +278,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_R255 0x8180
#define ENETC_PM0_R511 0x8188
#define ENETC_PM0_R1023 0x8190
#define ENETC_PM0_R1518 0x8198
#define ENETC_PM0_R1519X 0x81A0
#define ENETC_PM0_R1522 0x8198
#define ENETC_PM0_R1523X 0x81A0
#define ENETC_PM0_ROVR 0x81A8
#define ENETC_PM0_RJBR 0x81B0
#define ENETC_PM0_RFRG 0x81B8
@ -298,9 +298,13 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_TBCA 0x8250
#define ENETC_PM0_TPKT 0x8260
#define ENETC_PM0_TUND 0x8268
#define ENETC_PM0_T64 0x8270
#define ENETC_PM0_T127 0x8278
#define ENETC_PM0_T255 0x8280
#define ENETC_PM0_T511 0x8288
#define ENETC_PM0_T1023 0x8290
#define ENETC_PM0_T1518 0x8298
#define ENETC_PM0_T1522 0x8298
#define ENETC_PM0_T1523X 0x82A0
#define ENETC_PM0_TCNP 0x82C0
#define ENETC_PM0_TDFR 0x82D0
#define ENETC_PM0_TMCOL 0x82D8

View File

@ -35,8 +35,6 @@
#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
#pragma pack(1)
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
@ -85,8 +83,6 @@ struct hclge_dbg_reg_type_info {
struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},

View File

@ -1383,8 +1383,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
priv->port_stats.tx_timeout++;
en_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
en_dbg(DRV, priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
}
@ -1738,6 +1740,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
@ -1834,6 +1837,7 @@ int mlx4_en_start_port(struct net_device *dev)
local_bh_enable();
}
clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@ -2004,7 +2008,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
watchdog_task);
restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
@ -2386,7 +2390,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
/* NIC is probably restarting - let watchdog task reset
/* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
@ -2395,7 +2399,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
&priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
@ -2865,7 +2871,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
queue_work(mdev->workqueue, &priv->watchdog_task);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
@ -3263,7 +3270,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);

View File

@ -392,6 +392,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
u16 cqe_index, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info;
struct mlx4_en_tx_desc *tx_desc;
u16 wqe_index;
int desc_size;
en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
false);
wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
tx_info = &ring->tx_info[wqe_index];
desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
wqe_index, desc_size);
tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
return;
en_err(priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
bool mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
@ -438,13 +467,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
cqe_err->vendor_err_syndrome,
cqe_err->syndrome);
}
MLX4_CQE_OPCODE_ERROR))
if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;

View File

@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
} buf[MLX4_EN_CACHE_SIZE];
};
enum {
MLX4_EN_TX_RING_STATE_RECOVERING,
};
struct mlx4_en_priv;
struct mlx4_en_tx_ring {
@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
* Only queue_stopped might be used if BQL is not properly working.
*/
unsigned long queue_stopped;
unsigned long state;
struct mlx4_hwq_resources sp_wqres;
struct mlx4_qp sp_qp;
struct mlx4_qp_context sp_context;
@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
struct mutex mutex; /* for mutual access to stats bitmap */
};
enum {
MLX4_EN_STATE_FLAG_RESTARTING,
};
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@ -595,7 +604,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
struct work_struct rx_mode_task;
struct work_struct watchdog_task;
struct work_struct restart_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
@ -643,6 +652,7 @@ struct mlx4_en_priv {
u32 pflags;
u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
u8 rss_hash_fn;
unsigned long state;
};
enum mlx4_en_wol {

View File

@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
phy_ethtool_get_wol(netdev->phydev, wol);
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
phy_ethtool_set_wol(netdev->phydev, wol);
return 0;
return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
: -ENETDOWN;
}
#endif /* CONFIG_PM */

View File

@ -29,7 +29,6 @@
#define PRG_ETH0_EXT_RMII_MODE 4
/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
#define PRG_ETH0_TXDLY_SHIFT 5
@ -143,8 +142,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
}
clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
clk_configs->m250_mux.shift;
clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
MUX_CLK_NUM_PARENTS, &clk_mux_ops,
&clk_configs->m250_mux.hw);

View File

@ -1463,6 +1463,10 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
stmmac_free_tx_buffer(priv, queue, i);
}
/**
* stmmac_free_tx_skbufs - free TX skb buffers
* @priv: private structure
*/
static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
{
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
@ -2796,9 +2800,6 @@ static int stmmac_release(struct net_device *dev)
u32 chan;
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
@ -2815,6 +2816,11 @@ static int stmmac_release(struct net_device *dev)
if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA and clear the descriptors */
stmmac_stop_all_dma(priv);
@ -4948,6 +4954,11 @@ int stmmac_suspend(struct device *dev)
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
del_timer_sync(&priv->eee_ctrl_timer);
}
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);

View File

@ -1425,9 +1425,7 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
} else if (pdata) {
/* 2nd memory resource specifies DMA registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
lp->sdma_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(lp->sdma_regs)) {
dev_err(&pdev->dev,
"could not map DMA registers\n");

View File

@ -1038,11 +1038,17 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
int orig_iif = skb->skb_iif;
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
bool is_ll_src;
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb
* packet taps again. Reset pkt_type for upper layers to process skb.
* for packets with lladdr src, however, skip so that the dst can be
* determine at input using original ifindex in the case that daddr
* needs strict
*/
if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL;
if (skb->pkt_type == PACKET_LOOPBACK ||
(need_strict && !is_ndisc && !is_ll_src)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;

View File

@ -170,11 +170,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
struct omap8250_priv *priv)
{
u8 timeout = 255;
u8 old_mdr1;
old_mdr1 = serial_in(up, UART_OMAP_MDR1);
if (old_mdr1 == priv->mdr1)
return;
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
udelay(2);

View File

@ -342,6 +342,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
/* Agfa SNAPSCAN 1212U */
{ USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME },
/* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },

View File

@ -2733,7 +2733,7 @@ static int __init init(void)
{
int retval = -ENOMEM;
int i;
struct dummy *dum[MAX_NUM_UDC];
struct dummy *dum[MAX_NUM_UDC] = {};
if (usb_disabled())
return -ENODEV;

View File

@ -1714,6 +1714,10 @@ retry:
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irqrestore(&xhci->lock, flags);
if (bus_state->bus_suspended)
usleep_range(5000, 10000);
return 0;
}
EXPORT_SYMBOL(xhci_bus_suspend);

View File

@ -45,6 +45,7 @@
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI 0x15c1
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
@ -220,6 +221,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||

View File

@ -16,7 +16,7 @@ config USB_SISUSBVGA
config USB_SISUSBVGA_CON
bool "Text console and mode switching support" if USB_SISUSBVGA
depends on VT
depends on VT && BROKEN
select FONT_8x16
---help---
Say Y here if you want a VGA text console via the USB dongle or

View File

@ -867,6 +867,9 @@ static int uas_slave_configure(struct scsi_device *sdev)
if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
/* Some disks cannot handle WRITE_SAME */
if (devinfo->flags & US_FL_NO_SAME)
sdev->no_write_same = 1;
/*
* Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.

View File

@ -35,12 +35,15 @@ UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
/* Reported-by: Julian Groß <julian.g@posteo.de> */
/*
* Initially Reported-by: Julian Groß <julian.g@posteo.de>
* Further reports David C. Partridge <david.partridge@perdrix.co.uk>
*/
UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
"LaCie",
"2Big Quadra USB3",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI

View File

@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'j':
f |= US_FL_NO_REPORT_LUNS;
break;
case 'k':
f |= US_FL_NO_SAME;
break;
case 'l':
f |= US_FL_NOT_LOCKABLE;
break;

View File

@ -84,6 +84,8 @@
/* Cannot handle REPORT_LUNS */ \
US_FLAG(ALWAYS_SYNC, 0x20000000) \
/* lies about caching, so always sync */ \
US_FLAG(NO_SAME, 0x40000000) \
/* Cannot handle WRITE_SAME */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };

View File

@ -81,7 +81,8 @@ struct seccomp_metadata {
struct ptrace_syscall_info {
__u8 op; /* PTRACE_SYSCALL_INFO_* */
__u32 arch __attribute__((__aligned__(sizeof(__u32))));
__u8 pad[3];
__u32 arch;
__u64 instruction_pointer;
__u64 stack_pointer;
union {

View File

@ -30,6 +30,23 @@ static void ipi_mb(void *info)
smp_mb(); /* IPIs should be serializing but paranoid. */
}
static void ipi_sync_core(void *info)
{
/*
* The smp_mb() in membarrier after all the IPIs is supposed to
* ensure that memory on remote CPUs that occur before the IPI
* become visible to membarrier()'s caller -- see scenario B in
* the big comment at the top of this file.
*
* A sync_core() would provide this guarantee, but
* sync_core_before_usermode() might end up being deferred until
* after membarrier()'s smp_mb().
*/
smp_mb(); /* IPIs should be serializing but paranoid. */
sync_core_before_usermode();
}
static void ipi_sync_rq_state(void *info)
{
struct mm_struct *mm = (struct mm_struct *) info;
@ -134,6 +151,7 @@ static int membarrier_private_expedited(int flags)
int cpu;
cpumask_var_t tmpmask;
struct mm_struct *mm = current->mm;
smp_call_func_t ipi_func = ipi_mb;
if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
@ -141,6 +159,7 @@ static int membarrier_private_expedited(int flags)
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
ipi_func = ipi_sync_core;
} else {
if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
@ -181,7 +200,7 @@ static int membarrier_private_expedited(int flags)
rcu_read_unlock();
preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
smp_call_function_many(tmpmask, ipi_func, NULL, 1);
preempt_enable();
free_cpumask_var(tmpmask);

View File

@ -168,6 +168,9 @@ static int br_dev_open(struct net_device *dev)
br_stp_enable_bridge(br);
br_multicast_open(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
return 0;
}
@ -188,6 +191,9 @@ static int br_dev_stop(struct net_device *dev)
br_stp_disable_bridge(br);
br_multicast_stop(br);
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_leave_snoopers(br);
netif_stop_queue(dev);
return 0;

View File

@ -1848,7 +1848,7 @@ static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
}
#endif
static void br_multicast_join_snoopers(struct net_bridge *br)
void br_multicast_join_snoopers(struct net_bridge *br)
{
br_ip4_multicast_join_snoopers(br);
br_ip6_multicast_join_snoopers(br);
@ -1879,7 +1879,7 @@ static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
}
#endif
static void br_multicast_leave_snoopers(struct net_bridge *br)
void br_multicast_leave_snoopers(struct net_bridge *br)
{
br_ip4_multicast_leave_snoopers(br);
br_ip6_multicast_leave_snoopers(br);
@ -1898,9 +1898,6 @@ static void __br_multicast_open(struct net_bridge *br,
void br_multicast_open(struct net_bridge *br)
{
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
__br_multicast_open(br, &br->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
__br_multicast_open(br, &br->ip6_own_query);
@ -1916,9 +1913,6 @@ void br_multicast_stop(struct net_bridge *br)
del_timer_sync(&br->ip6_other_query.timer);
del_timer_sync(&br->ip6_own_query.timer);
#endif
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_leave_snoopers(br);
}
void br_multicast_dev_del(struct net_bridge *br)
@ -2049,6 +2043,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
struct net_bridge_port *port;
bool change_snoopers = false;
spin_lock_bh(&br->multicast_lock);
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
@ -2057,7 +2052,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
br_mc_disabled_update(br->dev, val);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
br_multicast_leave_snoopers(br);
change_snoopers = true;
goto unlock;
}
@ -2068,9 +2063,30 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
list_for_each_entry(port, &br->port_list, list)
__br_multicast_enable_port(port);
change_snoopers = true;
unlock:
spin_unlock_bh(&br->multicast_lock);
/* br_multicast_join_snoopers has the potential to cause
* an MLD Report/Leave to be delivered to br_multicast_rcv,
* which would in turn call br_multicast_add_group, which would
* attempt to acquire multicast_lock. This function should be
* called after the lock has been released to avoid deadlocks on
* multicast_lock.
*
* br_multicast_leave_snoopers does not have the problem since
* br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
* returns without calling br_multicast_ipv4/6_rcv if it's not
* enabled. Moved both functions out just for symmetry.
*/
if (change_snoopers) {
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
br_multicast_join_snoopers(br);
else
br_multicast_leave_snoopers(br);
}
return 0;
}

View File

@ -665,6 +665,8 @@ void br_multicast_del_port(struct net_bridge_port *port);
void br_multicast_enable_port(struct net_bridge_port *port);
void br_multicast_disable_port(struct net_bridge_port *port);
void br_multicast_init(struct net_bridge *br);
void br_multicast_join_snoopers(struct net_bridge *br);
void br_multicast_leave_snoopers(struct net_bridge *br);
void br_multicast_open(struct net_bridge *br);
void br_multicast_stop(struct net_bridge *br);
void br_multicast_dev_del(struct net_bridge *br);
@ -792,6 +794,14 @@ static inline void br_multicast_init(struct net_bridge *br)
{
}
static inline void br_multicast_join_snoopers(struct net_bridge *br)
{
}
static inline void br_multicast_leave_snoopers(struct net_bridge *br)
{
}
static inline void br_multicast_open(struct net_bridge *br)
{
}

View File

@ -260,8 +260,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
}
masterv = br_vlan_get_master(br, v->vid, extack);
if (!masterv)
if (!masterv) {
err = -ENOMEM;
goto out_filt;
}
v->brvlan = masterv;
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);

View File

@ -835,7 +835,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
if (has_gw && has_via) {
NL_SET_ERR_MSG(extack,
"Nexthop configuration can not contain both GATEWAY and VIA");
goto errout;
return -EINVAL;
}
return 0;

View File

@ -446,7 +446,6 @@ void tcp_init_buffer_space(struct sock *sk)
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_sndbuf_expand(sk);
tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@ -470,6 +469,8 @@ void tcp_init_buffer_space(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_jiffies32;
tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
(u32)TCP_INIT_CWND * tp->advmss);
}
/* 4. Recalculate window clamp after socket hit its memory bounds. */

View File

@ -1654,7 +1654,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
* window, and remember whether we were cwnd-limited then.
*/
if (!before(tp->snd_una, tp->max_packets_seq) ||
tp->packets_out > tp->max_packets_out) {
tp->packets_out > tp->max_packets_out ||
is_cwnd_limited) {
tp->max_packets_out = tp->packets_out;
tp->max_packets_seq = tp->snd_nxt;
tp->is_cwnd_limited = is_cwnd_limited;
@ -2476,6 +2477,10 @@ repair:
else
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
if (likely(sent_pkts || is_cwnd_limited))
tcp_cwnd_validate(sk, is_cwnd_limited);
if (likely(sent_pkts)) {
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += sent_pkts;
@ -2483,8 +2488,6 @@ repair:
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
tcp_schedule_loss_probe(sk, false);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
}
return !tp->packets_out && !tcp_write_queue_empty(sk);

View File

@ -2117,7 +2117,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
__skb_pull(skb, skb_transport_offset(skb));
ret = udp_queue_rcv_one_skb(sk, skb);
if (ret > 0)
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
}
return 0;
}

View File

@ -60,6 +60,7 @@ static struct mesh_table *mesh_table_alloc(void)
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
rhashtable_init(&newtbl->rhead, &mesh_rht_params);
return newtbl;
}
@ -775,9 +776,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
goto free_path;
}
rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
sdata->u.mesh.mesh_paths = tbl_path;
sdata->u.mesh.mpp_paths = tbl_mpp;

View File

@ -1934,11 +1934,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val)
{
struct snd_pcm_runtime *runtime;
int fragshift;
runtime = substream->runtime;
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
runtime->oss.fragshift = val & 0xffff;
fragshift = val & 0xffff;
if (fragshift >= 31)
return -EINVAL;
runtime->oss.fragshift = fragshift;
runtime->oss.maxfrags = (val >> 16) & 0xffff;
if (runtime->oss.fragshift < 4) /* < 16 */
runtime->oss.fragshift = 4;

View File

@ -40,6 +40,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
case UAC_VERSION_1:
default: {
struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
if (format >= 64)
return 0; /* invalid format */
sample_width = fmt->bBitResolution;
sample_bytes = fmt->bSubframeSize;
format = 1ULL << format;

View File

@ -193,16 +193,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
struct snd_usb_substream *subs = info->private_data;
struct snd_pcm_chmap_elem *chmap = NULL;
int i;
int i = 0;
memset(ucontrol->value.integer.value, 0,
sizeof(ucontrol->value.integer.value));
if (subs->cur_audiofmt)
chmap = subs->cur_audiofmt->chmap;
if (chmap) {
for (i = 0; i < chmap->channels; i++)
ucontrol->value.integer.value[i] = chmap->map[i];
}
for (; i < subs->channels_max; i++)
ucontrol->value.integer.value[i] = 0;
return 0;
}

View File

@ -4197,7 +4197,12 @@ sub do_send_mail {
$mail_command =~ s/\$SUBJECT/$subject/g;
$mail_command =~ s/\$MESSAGE/$message/g;
run_command $mail_command;
my $ret = run_command $mail_command;
if (!$ret && defined($file)) {
# try again without the file
$message .= "\n\n*** FAILED TO SEND LOG ***\n\n";
do_send_email($subject, $message);
}
}
sub send_email {

View File

@ -11,16 +11,12 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported
:;: "user-memory access syntax and ustring working on user memory";:
echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \
> kprobe_events
echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \
>> kprobe_events
grep myevent kprobe_events | \
grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string'
echo 1 > events/kprobes/myevent/enable
echo 1 > events/kprobes/myevent2/enable
echo > /dev/null
echo 0 > events/kprobes/myevent/enable
echo 0 > events/kprobes/myevent2/enable
grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"'

View File

@ -239,6 +239,28 @@ setup_cmd_nsb()
fi
}
setup_cmd_nsc()
{
local cmd="$*"
local rc
run_cmd_nsc ${cmd}
rc=$?
if [ $rc -ne 0 ]; then
# show user the command if not done so already
if [ "$VERBOSE" = "0" ]; then
echo "setup command: $cmd"
fi
echo "failed. stopping tests"
if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
echo
echo "hit enter to continue"
read a
fi
exit $rc
fi
}
# set sysctl values in NS-A
set_sysctl()
{
@ -447,6 +469,36 @@ setup()
sleep 1
}
setup_lla_only()
{
# make sure we are starting with a clean slate
kill_procs
cleanup 2>/dev/null
log_debug "Configuring network namespaces"
set -e
create_ns ${NSA} "-" "-"
create_ns ${NSB} "-" "-"
create_ns ${NSC} "-" "-"
connect_ns ${NSA} ${NSA_DEV} "-" "-" \
${NSB} ${NSB_DEV} "-" "-"
connect_ns ${NSA} ${NSA_DEV2} "-" "-" \
${NSC} ${NSC_DEV} "-" "-"
NSA_LINKIP6=$(get_linklocal ${NSA} ${NSA_DEV})
NSB_LINKIP6=$(get_linklocal ${NSB} ${NSB_DEV})
NSC_LINKIP6=$(get_linklocal ${NSC} ${NSC_DEV})
create_vrf ${NSA} ${VRF} ${VRF_TABLE} "-" "-"
ip -netns ${NSA} link set dev ${NSA_DEV} vrf ${VRF}
ip -netns ${NSA} link set dev ${NSA_DEV2} vrf ${VRF}
set +e
sleep 1
}
################################################################################
# IPv4
@ -3329,10 +3381,53 @@ use_case_br()
setup_cmd_nsb ip li del vlan100 2>/dev/null
}
# VRF only.
# ns-A device is connected to both ns-B and ns-C on a single VRF but only has
# LLA on the interfaces
use_case_ping_lla_multi()
{
setup_lla_only
# only want reply from ns-A
setup_cmd_nsb sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
setup_cmd_nsc sysctl -qw net.ipv6.icmp.echo_ignore_multicast=1
log_start
run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Pre cycle, ping out ns-B"
run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Pre cycle, ping out ns-C"
# cycle/flap the first ns-A interface
setup_cmd ip link set ${NSA_DEV} down
setup_cmd ip link set ${NSA_DEV} up
sleep 1
log_start
run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-B"
run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV}, ping out ns-C"
# cycle/flap the second ns-A interface
setup_cmd ip link set ${NSA_DEV2} down
setup_cmd ip link set ${NSA_DEV2} up
sleep 1
log_start
run_cmd_nsb ping -c1 -w1 ${MCAST}%${NSB_DEV}
log_test_addr ${MCAST}%${NSB_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-B"
run_cmd_nsc ping -c1 -w1 ${MCAST}%${NSC_DEV}
log_test_addr ${MCAST}%${NSC_DEV} $? 0 "Post cycle ${NSA} ${NSA_DEV2}, ping out ns-C"
}
use_cases()
{
log_section "Use cases"
log_subsection "Device enslaved to bridge"
use_case_br
log_subsection "Ping LLA with multiple interfaces"
use_case_ping_lla_multi
}
################################################################################